diff --git "a/test_sharegpt.json" "b/test_sharegpt.json" new file mode 100644--- /dev/null +++ "b/test_sharegpt.json" @@ -0,0 +1,246901 @@ +[ + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. approach\nC. transfer\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. approach" + } + ], + "images": [ + "images/task_00001_7_122_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00001", + "split": "eval", + "split_group_id": "task_00001__episode_000007", + "recording_id": null, + "episode_id": 7, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. release\nC. transfer\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/task_00001_18_478_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00001", + "split": "eval", + "split_group_id": "task_00001__episode_000018", + "recording_id": null, + "episode_id": 18, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. transfer\nC. approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/task_00001_23_395_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00001", + "split": "eval", + "split_group_id": "task_00001__episode_000023", + "recording_id": null, + "episode_id": 23, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the bottom of the scene\nC. toward the top of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00001_28_422_camera_top.jpg", + "images/task_00001_28_427_camera_top.jpg", + "images/task_00001_28_432_camera_top.jpg", + "images/task_00001_28_437_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00001", + "split": "eval", + "split_group_id": "task_00001__episode_000028", + "recording_id": null, + "episode_id": 28, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. release\nC. transfer\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. approach" + } + ], + "images": [ + "images/task_00001_52_131_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00001", + "split": "eval", + "split_group_id": "task_00001__episode_000052", + "recording_id": null, + "episode_id": 52, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the right side of the scene\nA. toward the top of the scene\nB. toward the bottom of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the right side of the scene" + } + ], + "images": [ + "images/task_00001_73_217_camera_top.jpg", + "images/task_00001_73_222_camera_top.jpg", + "images/task_00001_73_227_camera_top.jpg", + "images/task_00001_73_232_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00001", + "split": "eval", + "split_group_id": "task_00001__episode_000073", + "recording_id": null, + "episode_id": 73, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/task_00001_81_248_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00001", + "split": "eval", + "split_group_id": "task_00001__episode_000081", + "recording_id": null, + "episode_id": 81, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. release\nC. transfer\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/task_00001_86_509_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00001", + "split": "eval", + "split_group_id": "task_00001__episode_000086", + "recording_id": null, + "episode_id": 86, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. release\nC. pre-approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. approach" + } + ], + "images": [ + "images/task_00001_94_141_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00001", + "split": "eval", + "split_group_id": "task_00001__episode_000094", + "recording_id": null, + "episode_id": 94, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the top of the scene\nB. toward the left side of the scene\nC. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00001_100_301_camera_top.jpg", + "images/task_00001_100_306_camera_top.jpg", + "images/task_00001_100_311_camera_top.jpg", + "images/task_00001_100_316_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00001", + "split": "eval", + "split_group_id": "task_00001__episode_000100", + "recording_id": null, + "episode_id": 100, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. pre-approach" + } + ], + "images": [ + "images/task_00001_102_416_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00001", + "split": "eval", + "split_group_id": "task_00001__episode_000102", + "recording_id": null, + "episode_id": 102, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. transfer\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/task_00001_113_366_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00001", + "split": "eval", + "split_group_id": "task_00001__episode_000113", + "recording_id": null, + "episode_id": 113, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00002_10_952_camera_top_t-6.jpg", + "images/task_00002_10_955_camera_top_t-3.jpg", + "images/task_00002_10_958_camera_top_t0.jpg", + "images/task_00002_10_961_camera_top_t+3.jpg", + "images/task_00002_10_964_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000010", + "recording_id": null, + "episode_id": 10, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00002_10_427_camera_top.jpg", + "images/task_00002_10_430_camera_top.jpg", + "images/task_00002_10_433_camera_top.jpg", + "images/task_00002_10_436_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000010", + "recording_id": null, + "episode_id": 10, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00002_10_1508_camera_top.jpg", + "images/task_00002_10_1511_camera_top.jpg", + "images/task_00002_10_1514_camera_top.jpg", + "images/task_00002_10_1517_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000010", + "recording_id": null, + "episode_id": 10, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00002_10_265_camera_top.jpg", + "images/task_00002_10_138_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000010", + "recording_id": null, + "episode_id": 10, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_00002_10_108_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000010", + "recording_id": null, + "episode_id": 10, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00002_10_303_camera_top.jpg", + "images/task_00002_10_166_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000010", + "recording_id": null, + "episode_id": 10, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. hold and carry\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/task_00002_10_643_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000010", + "recording_id": null, + "episode_id": 10, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00002_10_812_camera_top.jpg", + "images/task_00002_10_142_camera_top.jpg", + "images/task_00002_10_319_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000010", + "recording_id": null, + "episode_id": 10, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00002_10_674_camera_top.jpg", + "images/task_00002_10_180_camera_top.jpg", + "images/task_00002_10_1566_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000010", + "recording_id": null, + "episode_id": 10, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_00002_21_164_camera_top.jpg", + "images/task_00002_21_1132_camera_top.jpg", + "images/task_00002_21_682_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000021", + "recording_id": null, + "episode_id": 21, + "camera": "camera_top", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the top of the scene\nB. toward the bottom of the scene\nC. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00002_21_195_camera_top.jpg", + "images/task_00002_21_200_camera_top.jpg", + "images/task_00002_21_205_camera_top.jpg", + "images/task_00002_21_210_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000021", + "recording_id": null, + "episode_id": 21, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00002_21_865_camera_top.jpg", + "images/task_00002_21_167_camera_top.jpg", + "images/task_00002_21_1170_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000021", + "recording_id": null, + "episode_id": 21, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00002_21_128_camera_top.jpg", + "images/task_00002_21_131_camera_top.jpg", + "images/task_00002_21_134_camera_top.jpg", + "images/task_00002_21_137_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000021", + "recording_id": null, + "episode_id": 21, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_00002_21_1145_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000021", + "recording_id": null, + "episode_id": 21, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00002_21_1200_camera_top.jpg", + "images/task_00002_21_1203_camera_top.jpg", + "images/task_00002_21_1206_camera_top.jpg", + "images/task_00002_21_1209_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000021", + "recording_id": null, + "episode_id": 21, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00002_21_497_camera_top.jpg", + "images/task_00002_21_500_camera_top.jpg", + "images/task_00002_21_503_camera_top.jpg", + "images/task_00002_21_506_camera_top.jpg", + "images/task_00002_21_509_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000021", + "recording_id": null, + "episode_id": 21, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_00002_21_1204_camera_top.jpg", + "images/task_00002_21_340_camera_top.jpg", + "images/task_00002_21_167_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000021", + "recording_id": null, + "episode_id": 21, + "camera": "camera_top", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00002_21_254_camera_top.jpg", + "images/task_00002_21_257_camera_top.jpg", + "images/task_00002_21_260_camera_top.jpg", + "images/task_00002_21_263_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000021", + "recording_id": null, + "episode_id": 21, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00002_21_238_camera_top.jpg", + "images/task_00002_21_241_camera_top.jpg", + "images/task_00002_21_244_camera_top.jpg", + "images/task_00002_21_247_camera_top.jpg", + "images/task_00002_21_250_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000021", + "recording_id": null, + "episode_id": 21, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. release\nC. hold and carry\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. pre-approach" + } + ], + "images": [ + "images/task_00002_21_81_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000021", + "recording_id": null, + "episode_id": 21, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00002_21_1165_camera_top.jpg", + "images/task_00002_21_1168_camera_top.jpg", + "images/task_00002_21_1171_camera_top.jpg", + "images/task_00002_21_1174_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000021", + "recording_id": null, + "episode_id": 21, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00002_100_219_camera_top.jpg", + "images/task_00002_100_222_camera_top.jpg", + "images/task_00002_100_225_camera_top.jpg", + "images/task_00002_100_228_camera_top.jpg", + "images/task_00002_100_231_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000100", + "recording_id": null, + "episode_id": 100, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_00002_100_396_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000100", + "recording_id": null, + "episode_id": 100, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00002_100_609_camera_top.jpg", + "images/task_00002_100_398_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000100", + "recording_id": null, + "episode_id": 100, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00002_100_295_camera_top_t-6.jpg", + "images/task_00002_100_298_camera_top_t-3.jpg", + "images/task_00002_100_301_camera_top_t0.jpg", + "images/task_00002_100_304_camera_top_t+3.jpg", + "images/task_00002_100_307_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000100", + "recording_id": null, + "episode_id": 100, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00002_100_945_camera_top.jpg", + "images/task_00002_100_948_camera_top.jpg", + "images/task_00002_100_951_camera_top.jpg", + "images/task_00002_100_954_camera_top.jpg", + "images/task_00002_100_957_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000100", + "recording_id": null, + "episode_id": 100, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00002_100_112_camera_top.jpg", + "images/task_00002_100_115_camera_top.jpg", + "images/task_00002_100_118_camera_top.jpg", + "images/task_00002_100_121_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000100", + "recording_id": null, + "episode_id": 100, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00002_100_204_camera_top.jpg", + "images/task_00002_100_207_camera_top.jpg", + "images/task_00002_100_210_camera_top.jpg", + "images/task_00002_100_213_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000100", + "recording_id": null, + "episode_id": 100, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the top of the scene\nA. toward the right side of the scene\nB. toward the left side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the top of the scene" + } + ], + "images": [ + "images/task_00002_100_1588_camera_top.jpg", + "images/task_00002_100_1593_camera_top.jpg", + "images/task_00002_100_1598_camera_top.jpg", + "images/task_00002_100_1603_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000100", + "recording_id": null, + "episode_id": 100, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. hold and carry\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/task_00002_100_1681_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000100", + "recording_id": null, + "episode_id": 100, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00002_100_772_camera_top.jpg", + "images/task_00002_100_775_camera_top.jpg", + "images/task_00002_100_778_camera_top.jpg", + "images/task_00002_100_781_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000100", + "recording_id": null, + "episode_id": 100, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_00002_100_1151_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000100", + "recording_id": null, + "episode_id": 100, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00002_100_1491_camera_top.jpg", + "images/task_00002_100_1494_camera_top.jpg", + "images/task_00002_100_1497_camera_top.jpg", + "images/task_00002_100_1500_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000100", + "recording_id": null, + "episode_id": 100, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00002_100_198_camera_top.jpg", + "images/task_00002_100_201_camera_top.jpg", + "images/task_00002_100_204_camera_top.jpg", + "images/task_00002_100_207_camera_top.jpg", + "images/task_00002_100_210_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000100", + "recording_id": null, + "episode_id": 100, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00002_102_1575_camera_top.jpg", + "images/task_00002_102_1414_camera_top.jpg", + "images/task_00002_102_400_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000102", + "recording_id": null, + "episode_id": 102, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_00002_102_537_camera_top.jpg", + "images/task_00002_102_692_camera_top.jpg", + "images/task_00002_102_1688_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000102", + "recording_id": null, + "episode_id": 102, + "camera": "camera_top", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00002_102_283_camera_top.jpg", + "images/task_00002_102_286_camera_top.jpg", + "images/task_00002_102_289_camera_top.jpg", + "images/task_00002_102_292_camera_top.jpg", + "images/task_00002_102_295_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000102", + "recording_id": null, + "episode_id": 102, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00002_102_1338_camera_top.jpg", + "images/task_00002_102_1341_camera_top.jpg", + "images/task_00002_102_1344_camera_top.jpg", + "images/task_00002_102_1347_camera_top.jpg", + "images/task_00002_102_1350_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000102", + "recording_id": null, + "episode_id": 102, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00002_102_1590_camera_top_t-6.jpg", + "images/task_00002_102_1593_camera_top_t-3.jpg", + "images/task_00002_102_1596_camera_top_t0.jpg", + "images/task_00002_102_1599_camera_top_t+3.jpg", + "images/task_00002_102_1602_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000102", + "recording_id": null, + "episode_id": 102, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_00002_102_1267_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000102", + "recording_id": null, + "episode_id": 102, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00002_102_776_camera_top.jpg", + "images/task_00002_102_779_camera_top.jpg", + "images/task_00002_102_782_camera_top.jpg", + "images/task_00002_102_785_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000102", + "recording_id": null, + "episode_id": 102, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00002_102_514_camera_top_t-6.jpg", + "images/task_00002_102_517_camera_top_t-3.jpg", + "images/task_00002_102_520_camera_top_t0.jpg", + "images/task_00002_102_523_camera_top_t+3.jpg", + "images/task_00002_102_526_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000102", + "recording_id": null, + "episode_id": 102, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the bottom of the scene\nA. toward the top of the scene\nB. toward the left side of the scene\nC. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00002_102_596_camera_top.jpg", + "images/task_00002_102_601_camera_top.jpg", + "images/task_00002_102_606_camera_top.jpg", + "images/task_00002_102_611_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000102", + "recording_id": null, + "episode_id": 102, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the bottom of the scene\nB. toward the top of the scene\nC. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00002_129_1922_camera_top.jpg", + "images/task_00002_129_1927_camera_top.jpg", + "images/task_00002_129_1932_camera_top.jpg", + "images/task_00002_129_1937_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00002_129_1542_camera_top.jpg", + "images/task_00002_129_1545_camera_top.jpg", + "images/task_00002_129_1548_camera_top.jpg", + "images/task_00002_129_1551_camera_top.jpg", + "images/task_00002_129_1554_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00002_129_1106_camera_top.jpg", + "images/task_00002_129_1109_camera_top.jpg", + "images/task_00002_129_1112_camera_top.jpg", + "images/task_00002_129_1115_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00002_129_1615_camera_top.jpg", + "images/task_00002_129_1903_camera_top.jpg", + "images/task_00002_129_1073_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00002_129_1759_camera_top.jpg", + "images/task_00002_129_1762_camera_top.jpg", + "images/task_00002_129_1765_camera_top.jpg", + "images/task_00002_129_1768_camera_top.jpg", + "images/task_00002_129_1771_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00002_129_155_camera_top.jpg", + "images/task_00002_129_158_camera_top.jpg", + "images/task_00002_129_161_camera_top.jpg", + "images/task_00002_129_164_camera_top.jpg", + "images/task_00002_129_167_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00002_129_1882_camera_top.jpg", + "images/task_00002_129_1949_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00002_129_32_camera_top.jpg", + "images/task_00002_129_35_camera_top.jpg", + "images/task_00002_129_38_camera_top.jpg", + "images/task_00002_129_41_camera_top.jpg", + "images/task_00002_129_44_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00002_129_116_camera_top.jpg", + "images/task_00002_129_119_camera_top.jpg", + "images/task_00002_129_122_camera_top.jpg", + "images/task_00002_129_125_camera_top.jpg", + "images/task_00002_129_128_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00002_129_227_camera_top.jpg", + "images/task_00002_129_230_camera_top.jpg", + "images/task_00002_129_233_camera_top.jpg", + "images/task_00002_129_236_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the left side of the scene\nA. toward the right side of the scene\nB. toward the top of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the left side of the scene" + } + ], + "images": [ + "images/task_00002_129_1463_camera_top.jpg", + "images/task_00002_129_1468_camera_top.jpg", + "images/task_00002_129_1473_camera_top.jpg", + "images/task_00002_129_1478_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00002_129_1595_camera_top.jpg", + "images/task_00002_129_1893_camera_top.jpg", + "images/task_00002_129_1752_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. hold and carry\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. hold and carry" + } + ], + "images": [ + "images/task_00002_129_1542_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00002", + "split": "eval", + "split_group_id": "task_00002__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. approach" + } + ], + "images": [ + "images/task_00003_43_211_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00003", + "split": "eval", + "split_group_id": "task_00003__episode_000043", + "recording_id": null, + "episode_id": 43, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. transfer\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. approach" + } + ], + "images": [ + "images/task_00003_43_197_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00003", + "split": "eval", + "split_group_id": "task_00003__episode_000043", + "recording_id": null, + "episode_id": 43, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00003_43_694_camera_top.jpg", + "images/task_00003_43_1205_camera_top.jpg", + "images/task_00003_43_141_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00003", + "split": "eval", + "split_group_id": "task_00003__episode_000043", + "recording_id": null, + "episode_id": 43, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00003_43_187_camera_top.jpg", + "images/task_00003_43_265_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00003", + "split": "eval", + "split_group_id": "task_00003__episode_000043", + "recording_id": null, + "episode_id": 43, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00003_43_188_camera_top.jpg", + "images/task_00003_43_257_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00003", + "split": "eval", + "split_group_id": "task_00003__episode_000043", + "recording_id": null, + "episode_id": 43, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00003_43_202_camera_top.jpg", + "images/task_00003_43_1210_camera_top.jpg", + "images/task_00003_43_485_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00003", + "split": "eval", + "split_group_id": "task_00003__episode_000043", + "recording_id": null, + "episode_id": 43, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00003_43_140_camera_top_t-6.jpg", + "images/task_00003_43_143_camera_top_t-3.jpg", + "images/task_00003_43_146_camera_top_t0.jpg", + "images/task_00003_43_149_camera_top_t+3.jpg", + "images/task_00003_43_152_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00003", + "split": "eval", + "split_group_id": "task_00003__episode_000043", + "recording_id": null, + "episode_id": 43, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. hold and carry\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. pre-approach" + } + ], + "images": [ + "images/task_00003_80_188_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00003", + "split": "eval", + "split_group_id": "task_00003__episode_000080", + "recording_id": null, + "episode_id": 80, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. release\nC. transfer\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. pre-approach" + } + ], + "images": [ + "images/task_00003_113_260_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00003", + "split": "eval", + "split_group_id": "task_00003__episode_000113", + "recording_id": null, + "episode_id": 113, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. release\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. pre-approach" + } + ], + "images": [ + "images/task_00003_142_82_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00003", + "split": "eval", + "split_group_id": "task_00003__episode_000142", + "recording_id": null, + "episode_id": 142, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. contact\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. pre-approach" + } + ], + "images": [ + "images/task_00003_153_100_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00003", + "split": "eval", + "split_group_id": "task_00003__episode_000153", + "recording_id": null, + "episode_id": 153, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. transfer\nC. approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. approach" + } + ], + "images": [ + "images/task_00003_168_45_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00003", + "split": "eval", + "split_group_id": "task_00003__episode_000168", + "recording_id": null, + "episode_id": 168, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. contact\nC. pre-approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. pre-approach" + } + ], + "images": [ + "images/task_00003_171_1117_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00003", + "split": "eval", + "split_group_id": "task_00003__episode_000171", + "recording_id": null, + "episode_id": 171, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00006_45_1050_camera_top_t-6.jpg", + "images/task_00006_45_1053_camera_top_t-3.jpg", + "images/task_00006_45_1056_camera_top_t0.jpg", + "images/task_00006_45_1059_camera_top_t+3.jpg", + "images/task_00006_45_1062_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00006", + "split": "eval", + "split_group_id": "task_00006__episode_000045", + "recording_id": null, + "episode_id": 45, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. hold and carry\nC. contact\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/task_00006_45_721_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00006", + "split": "eval", + "split_group_id": "task_00006__episode_000045", + "recording_id": null, + "episode_id": 45, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00006_45_331_camera_top.jpg", + "images/task_00006_45_1287_camera_top.jpg", + "images/task_00006_45_1126_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00006", + "split": "eval", + "split_group_id": "task_00006__episode_000045", + "recording_id": null, + "episode_id": 45, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00006_45_1299_camera_top.jpg", + "images/task_00006_45_1128_camera_top.jpg", + "images/task_00006_45_1087_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00006", + "split": "eval", + "split_group_id": "task_00006__episode_000045", + "recording_id": null, + "episode_id": 45, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. contact\nC. approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/task_00006_45_344_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00006", + "split": "eval", + "split_group_id": "task_00006__episode_000045", + "recording_id": null, + "episode_id": 45, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00006_45_844_camera_top.jpg", + "images/task_00006_45_1168_camera_top.jpg", + "images/task_00006_45_1413_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00006", + "split": "eval", + "split_group_id": "task_00006__episode_000045", + "recording_id": null, + "episode_id": 45, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00006_45_1169_camera_top.jpg", + "images/task_00006_45_1172_camera_top.jpg", + "images/task_00006_45_1175_camera_top.jpg", + "images/task_00006_45_1178_camera_top.jpg", + "images/task_00006_45_1181_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00006", + "split": "eval", + "split_group_id": "task_00006__episode_000045", + "recording_id": null, + "episode_id": 45, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00006_45_557_camera_top.jpg", + "images/task_00006_45_560_camera_top.jpg", + "images/task_00006_45_563_camera_top.jpg", + "images/task_00006_45_566_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00006", + "split": "eval", + "split_group_id": "task_00006__episode_000045", + "recording_id": null, + "episode_id": 45, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. hold and carry\nC. pre-approach\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. pre-approach" + } + ], + "images": [ + "images/task_00006_45_297_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00006", + "split": "eval", + "split_group_id": "task_00006__episode_000045", + "recording_id": null, + "episode_id": 45, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the left side of the scene\nA. toward the bottom of the scene\nB. toward the top of the scene\nC. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the left side of the scene" + } + ], + "images": [ + "images/task_00006_56_910_camera_top.jpg", + "images/task_00006_56_915_camera_top.jpg", + "images/task_00006_56_920_camera_top.jpg", + "images/task_00006_56_925_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00006", + "split": "eval", + "split_group_id": "task_00006__episode_000056", + "recording_id": null, + "episode_id": 56, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00006_88_987_camera_top_t-6.jpg", + "images/task_00006_88_990_camera_top_t-3.jpg", + "images/task_00006_88_993_camera_top_t0.jpg", + "images/task_00006_88_996_camera_top_t+3.jpg", + "images/task_00006_88_999_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00006", + "split": "eval", + "split_group_id": "task_00006__episode_000088", + "recording_id": null, + "episode_id": 88, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_00006_88_837_camera_top.jpg", + "images/task_00006_88_462_camera_top.jpg", + "images/task_00006_88_985_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00006", + "split": "eval", + "split_group_id": "task_00006__episode_000088", + "recording_id": null, + "episode_id": 88, + "camera": "camera_top", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00006_88_483_camera_top.jpg", + "images/task_00006_88_261_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00006", + "split": "eval", + "split_group_id": "task_00006__episode_000088", + "recording_id": null, + "episode_id": 88, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00006_88_963_camera_top.jpg", + "images/task_00006_88_966_camera_top.jpg", + "images/task_00006_88_969_camera_top.jpg", + "images/task_00006_88_972_camera_top.jpg", + "images/task_00006_88_975_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00006", + "split": "eval", + "split_group_id": "task_00006__episode_000088", + "recording_id": null, + "episode_id": 88, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00006_88_750_camera_top.jpg", + "images/task_00006_88_869_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00006", + "split": "eval", + "split_group_id": "task_00006__episode_000088", + "recording_id": null, + "episode_id": 88, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00006_88_1289_camera_top.jpg", + "images/task_00006_88_1292_camera_top.jpg", + "images/task_00006_88_1295_camera_top.jpg", + "images/task_00006_88_1298_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00006", + "split": "eval", + "split_group_id": "task_00006__episode_000088", + "recording_id": null, + "episode_id": 88, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_00006_88_907_camera_top.jpg", + "images/task_00006_88_529_camera_top.jpg", + "images/task_00006_88_952_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00006", + "split": "eval", + "split_group_id": "task_00006__episode_000088", + "recording_id": null, + "episode_id": 88, + "camera": "camera_top", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. release\nC. approach\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/task_00006_88_1392_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00006", + "split": "eval", + "split_group_id": "task_00006__episode_000088", + "recording_id": null, + "episode_id": 88, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. transfer\nC. pre-approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. pre-approach" + } + ], + "images": [ + "images/task_00006_88_228_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00006", + "split": "eval", + "split_group_id": "task_00006__episode_000088", + "recording_id": null, + "episode_id": 88, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00006_88_442_camera_top.jpg", + "images/task_00006_88_716_camera_top.jpg", + "images/task_00006_88_1076_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00006", + "split": "eval", + "split_group_id": "task_00006__episode_000088", + "recording_id": null, + "episode_id": 88, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00006_115_334_camera_top.jpg", + "images/task_00006_115_337_camera_top.jpg", + "images/task_00006_115_340_camera_top.jpg", + "images/task_00006_115_343_camera_top.jpg", + "images/task_00006_115_346_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00006", + "split": "eval", + "split_group_id": "task_00006__episode_000115", + "recording_id": null, + "episode_id": 115, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. release\nC. pre-approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. pre-approach" + } + ], + "images": [ + "images/task_00007_5_183_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000005", + "recording_id": null, + "episode_id": 5, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. transfer\nC. approach\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. pre-approach" + } + ], + "images": [ + "images/task_00007_7_307_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000007", + "recording_id": null, + "episode_id": 7, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. pre-approach\nC. contact\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. pre-approach" + } + ], + "images": [ + "images/task_00007_10_725_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000010", + "recording_id": null, + "episode_id": 10, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00007_15_406_camera_top.jpg", + "images/task_00007_15_409_camera_top.jpg", + "images/task_00007_15_412_camera_top.jpg", + "images/task_00007_15_415_camera_top.jpg", + "images/task_00007_15_418_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000015", + "recording_id": null, + "episode_id": 15, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. hold and carry\nC. pre-approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/task_00007_15_602_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000015", + "recording_id": null, + "episode_id": 15, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_00007_15_251_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000015", + "recording_id": null, + "episode_id": 15, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00007_15_234_camera_top.jpg", + "images/task_00007_15_292_camera_top.jpg", + "images/task_00007_15_131_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000015", + "recording_id": null, + "episode_id": 15, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. pre-approach\nC. release\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/task_00007_15_380_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000015", + "recording_id": null, + "episode_id": 15, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00007_15_235_camera_top.jpg", + "images/task_00007_15_302_camera_top.jpg", + "images/task_00007_15_148_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000015", + "recording_id": null, + "episode_id": 15, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00007_15_413_camera_top.jpg", + "images/task_00007_15_550_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000015", + "recording_id": null, + "episode_id": 15, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00007_15_247_camera_top.jpg", + "images/task_00007_15_250_camera_top.jpg", + "images/task_00007_15_253_camera_top.jpg", + "images/task_00007_15_256_camera_top.jpg", + "images/task_00007_15_259_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000015", + "recording_id": null, + "episode_id": 15, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the top of the scene\nA. toward the bottom of the scene\nC. toward the left side of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the top of the scene" + } + ], + "images": [ + "images/task_00007_15_387_camera_top.jpg", + "images/task_00007_15_392_camera_top.jpg", + "images/task_00007_15_397_camera_top.jpg", + "images/task_00007_15_402_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000015", + "recording_id": null, + "episode_id": 15, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the top of the scene\nA. toward the bottom of the scene\nB. toward the left side of the scene\nC. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the top of the scene" + } + ], + "images": [ + "images/task_00007_15_474_camera_top.jpg", + "images/task_00007_15_479_camera_top.jpg", + "images/task_00007_15_484_camera_top.jpg", + "images/task_00007_15_489_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000015", + "recording_id": null, + "episode_id": 15, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00007_15_100_camera_top.jpg", + "images/task_00007_15_210_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000015", + "recording_id": null, + "episode_id": 15, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. contact\nC. release\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/task_00007_15_370_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000015", + "recording_id": null, + "episode_id": 15, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00007_15_238_camera_top.jpg", + "images/task_00007_15_241_camera_top.jpg", + "images/task_00007_15_244_camera_top.jpg", + "images/task_00007_15_247_camera_top.jpg", + "images/task_00007_15_250_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000015", + "recording_id": null, + "episode_id": 15, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the top of the scene\nA. toward the right side of the scene\nB. toward the bottom of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the top of the scene" + } + ], + "images": [ + "images/task_00007_18_484_camera_top.jpg", + "images/task_00007_18_489_camera_top.jpg", + "images/task_00007_18_494_camera_top.jpg", + "images/task_00007_18_499_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000018", + "recording_id": null, + "episode_id": 18, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00007_18_246_camera_top.jpg", + "images/task_00007_18_249_camera_top.jpg", + "images/task_00007_18_252_camera_top.jpg", + "images/task_00007_18_255_camera_top.jpg", + "images/task_00007_18_258_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000018", + "recording_id": null, + "episode_id": 18, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_00007_18_101_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000018", + "recording_id": null, + "episode_id": 18, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00007_18_478_camera_top_t-6.jpg", + "images/task_00007_18_481_camera_top_t-3.jpg", + "images/task_00007_18_484_camera_top_t0.jpg", + "images/task_00007_18_487_camera_top_t+3.jpg", + "images/task_00007_18_490_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000018", + "recording_id": null, + "episode_id": 18, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_00007_18_130_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000018", + "recording_id": null, + "episode_id": 18, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. pre-approach\nC. release\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/task_00007_18_146_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000018", + "recording_id": null, + "episode_id": 18, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the left side of the scene\nB. toward the top of the scene\nC. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00007_18_143_camera_top.jpg", + "images/task_00007_18_148_camera_top.jpg", + "images/task_00007_18_153_camera_top.jpg", + "images/task_00007_18_158_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000018", + "recording_id": null, + "episode_id": 18, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00007_18_412_camera_top.jpg", + "images/task_00007_18_415_camera_top.jpg", + "images/task_00007_18_418_camera_top.jpg", + "images/task_00007_18_421_camera_top.jpg", + "images/task_00007_18_424_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000018", + "recording_id": null, + "episode_id": 18, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00007_18_420_camera_top.jpg", + "images/task_00007_18_529_camera_top.jpg", + "images/task_00007_18_344_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000018", + "recording_id": null, + "episode_id": 18, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_00007_18_572_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000018", + "recording_id": null, + "episode_id": 18, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00007_18_223_camera_top.jpg", + "images/task_00007_18_93_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000018", + "recording_id": null, + "episode_id": 18, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the right side of the scene\nB. toward the top of the scene\nC. toward the bottom of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the right side of the scene" + } + ], + "images": [ + "images/task_00007_18_148_camera_top.jpg", + "images/task_00007_18_153_camera_top.jpg", + "images/task_00007_18_158_camera_top.jpg", + "images/task_00007_18_163_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000018", + "recording_id": null, + "episode_id": 18, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00007_28_107_camera_top.jpg", + "images/task_00007_28_334_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000028", + "recording_id": null, + "episode_id": 28, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00007_28_399_camera_top.jpg", + "images/task_00007_28_402_camera_top.jpg", + "images/task_00007_28_405_camera_top.jpg", + "images/task_00007_28_408_camera_top.jpg", + "images/task_00007_28_411_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000028", + "recording_id": null, + "episode_id": 28, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_00007_28_110_camera_top.jpg", + "images/task_00007_28_363_camera_top.jpg", + "images/task_00007_28_422_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000028", + "recording_id": null, + "episode_id": 28, + "camera": "camera_top", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the top of the scene\nB. toward the bottom of the scene\nC. toward the left side of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the top of the scene" + } + ], + "images": [ + "images/task_00007_28_507_camera_top.jpg", + "images/task_00007_28_512_camera_top.jpg", + "images/task_00007_28_517_camera_top.jpg", + "images/task_00007_28_522_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000028", + "recording_id": null, + "episode_id": 28, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00007_28_169_camera_top.jpg", + "images/task_00007_28_105_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000028", + "recording_id": null, + "episode_id": 28, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00007_28_176_camera_top.jpg", + "images/task_00007_28_179_camera_top.jpg", + "images/task_00007_28_182_camera_top.jpg", + "images/task_00007_28_185_camera_top.jpg", + "images/task_00007_28_188_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000028", + "recording_id": null, + "episode_id": 28, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00007_28_361_camera_top_t-6.jpg", + "images/task_00007_28_364_camera_top_t-3.jpg", + "images/task_00007_28_367_camera_top_t0.jpg", + "images/task_00007_28_370_camera_top_t+3.jpg", + "images/task_00007_28_373_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000028", + "recording_id": null, + "episode_id": 28, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. release\nC. contact\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. pre-approach" + } + ], + "images": [ + "images/task_00007_39_33_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000039", + "recording_id": null, + "episode_id": 39, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00007_63_292_camera_top.jpg", + "images/task_00007_63_523_camera_top.jpg", + "images/task_00007_63_419_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000063", + "recording_id": null, + "episode_id": 63, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00007_63_198_camera_top.jpg", + "images/task_00007_63_468_camera_top.jpg", + "images/task_00007_63_795_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000063", + "recording_id": null, + "episode_id": 63, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the top of the scene\nA. toward the bottom of the scene\nB. toward the left side of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the top of the scene" + } + ], + "images": [ + "images/task_00007_63_903_camera_top.jpg", + "images/task_00007_63_908_camera_top.jpg", + "images/task_00007_63_913_camera_top.jpg", + "images/task_00007_63_918_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000063", + "recording_id": null, + "episode_id": 63, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00007_63_570_camera_top_t-6.jpg", + "images/task_00007_63_573_camera_top_t-3.jpg", + "images/task_00007_63_576_camera_top_t0.jpg", + "images/task_00007_63_579_camera_top_t+3.jpg", + "images/task_00007_63_582_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000063", + "recording_id": null, + "episode_id": 63, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00007_63_697_camera_top.jpg", + "images/task_00007_63_700_camera_top.jpg", + "images/task_00007_63_703_camera_top.jpg", + "images/task_00007_63_706_camera_top.jpg", + "images/task_00007_63_709_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000063", + "recording_id": null, + "episode_id": 63, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00007_63_1006_camera_top_t-6.jpg", + "images/task_00007_63_1009_camera_top_t-3.jpg", + "images/task_00007_63_1012_camera_top_t0.jpg", + "images/task_00007_63_1015_camera_top_t+3.jpg", + "images/task_00007_63_1018_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000063", + "recording_id": null, + "episode_id": 63, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00007_63_911_camera_top.jpg", + "images/task_00007_63_793_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000063", + "recording_id": null, + "episode_id": 63, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00007_63_715_camera_top.jpg", + "images/task_00007_63_718_camera_top.jpg", + "images/task_00007_63_721_camera_top.jpg", + "images/task_00007_63_724_camera_top.jpg", + "images/task_00007_63_727_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000063", + "recording_id": null, + "episode_id": 63, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the bottom of the scene\nB. toward the left side of the scene\nC. toward the right side of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00007_65_458_camera_top.jpg", + "images/task_00007_65_463_camera_top.jpg", + "images/task_00007_65_468_camera_top.jpg", + "images/task_00007_65_473_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_00007_65_525_camera_top.jpg", + "images/task_00007_65_166_camera_top.jpg", + "images/task_00007_65_234_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00007_65_592_camera_top.jpg", + "images/task_00007_65_595_camera_top.jpg", + "images/task_00007_65_598_camera_top.jpg", + "images/task_00007_65_601_camera_top.jpg", + "images/task_00007_65_604_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00007_65_283_camera_top.jpg", + "images/task_00007_65_286_camera_top.jpg", + "images/task_00007_65_289_camera_top.jpg", + "images/task_00007_65_292_camera_top.jpg", + "images/task_00007_65_295_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00007_65_554_camera_top.jpg", + "images/task_00007_65_452_camera_top.jpg", + "images/task_00007_65_167_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00007_65_385_camera_top.jpg", + "images/task_00007_65_388_camera_top.jpg", + "images/task_00007_65_391_camera_top.jpg", + "images/task_00007_65_394_camera_top.jpg", + "images/task_00007_65_397_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00007_65_466_camera_top.jpg", + "images/task_00007_65_610_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. release\nC. pre-approach\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. pre-approach" + } + ], + "images": [ + "images/task_00007_65_140_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the left side of the scene\nC. toward the top of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00007_65_428_camera_top.jpg", + "images/task_00007_65_433_camera_top.jpg", + "images/task_00007_65_438_camera_top.jpg", + "images/task_00007_65_443_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the left side of the scene\nA. toward the right side of the scene\nC. toward the top of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the left side of the scene" + } + ], + "images": [ + "images/task_00007_65_712_camera_top.jpg", + "images/task_00007_65_717_camera_top.jpg", + "images/task_00007_65_722_camera_top.jpg", + "images/task_00007_65_727_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00007_65_601_camera_top.jpg", + "images/task_00007_65_381_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. contact\nC. hold and carry\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/task_00007_65_806_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the bottom of the scene\nA. toward the left side of the scene\nB. toward the right side of the scene\nC. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00007_65_455_camera_top.jpg", + "images/task_00007_65_460_camera_top.jpg", + "images/task_00007_65_465_camera_top.jpg", + "images/task_00007_65_470_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. pre-approach\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. pre-approach" + } + ], + "images": [ + "images/task_00007_71_164_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000071", + "recording_id": null, + "episode_id": 71, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. release\nC. pre-approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. pre-approach" + } + ], + "images": [ + "images/task_00007_73_516_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000073", + "recording_id": null, + "episode_id": 73, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. pre-approach\nC. contact\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. pre-approach" + } + ], + "images": [ + "images/task_00007_76_269_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000076", + "recording_id": null, + "episode_id": 76, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. pre-approach\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. pre-approach" + } + ], + "images": [ + "images/task_00007_78_238_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000078", + "recording_id": null, + "episode_id": 78, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. contact\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. pre-approach" + } + ], + "images": [ + "images/task_00007_105_650_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000105", + "recording_id": null, + "episode_id": 105, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. contact\nC. pre-approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. pre-approach" + } + ], + "images": [ + "images/task_00007_115_154_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000115", + "recording_id": null, + "episode_id": 115, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. approach\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. hold and carry" + } + ], + "images": [ + "images/task_00007_123_330_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000123", + "recording_id": null, + "episode_id": 123, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00007_123_342_camera_top.jpg", + "images/task_00007_123_345_camera_top.jpg", + "images/task_00007_123_348_camera_top.jpg", + "images/task_00007_123_351_camera_top.jpg", + "images/task_00007_123_354_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000123", + "recording_id": null, + "episode_id": 123, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the bottom of the scene\nB. toward the top of the scene\nC. toward the right side of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00007_123_256_camera_top.jpg", + "images/task_00007_123_261_camera_top.jpg", + "images/task_00007_123_266_camera_top.jpg", + "images/task_00007_123_271_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000123", + "recording_id": null, + "episode_id": 123, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. release\nC. approach\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. approach" + } + ], + "images": [ + "images/task_00007_123_247_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000123", + "recording_id": null, + "episode_id": 123, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00007_123_228_camera_top.jpg", + "images/task_00007_123_160_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000123", + "recording_id": null, + "episode_id": 123, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00007_123_192_camera_top.jpg", + "images/task_00007_123_195_camera_top.jpg", + "images/task_00007_123_198_camera_top.jpg", + "images/task_00007_123_201_camera_top.jpg", + "images/task_00007_123_204_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000123", + "recording_id": null, + "episode_id": 123, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. pre-approach\nC. release\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. approach" + } + ], + "images": [ + "images/task_00007_123_223_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000123", + "recording_id": null, + "episode_id": 123, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00007_123_229_camera_top_t-6.jpg", + "images/task_00007_123_232_camera_top_t-3.jpg", + "images/task_00007_123_235_camera_top_t0.jpg", + "images/task_00007_123_238_camera_top_t+3.jpg", + "images/task_00007_123_241_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000123", + "recording_id": null, + "episode_id": 123, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00007_123_491_camera_top.jpg", + "images/task_00007_123_282_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000123", + "recording_id": null, + "episode_id": 123, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00007_123_409_camera_top.jpg", + "images/task_00007_123_412_camera_top.jpg", + "images/task_00007_123_415_camera_top.jpg", + "images/task_00007_123_418_camera_top.jpg", + "images/task_00007_123_421_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00007", + "split": "eval", + "split_group_id": "task_00007__episode_000123", + "recording_id": null, + "episode_id": 123, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_00008_13_116_camera_top.jpg", + "images/task_00008_13_601_camera_top.jpg", + "images/task_00008_13_896_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00008", + "split": "eval", + "split_group_id": "task_00008__episode_000013", + "recording_id": null, + "episode_id": 13, + "camera": "camera_top", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the right side of the scene\nA. toward the bottom of the scene\nB. toward the top of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the right side of the scene" + } + ], + "images": [ + "images/task_00008_16_230_camera_top.jpg", + "images/task_00008_16_235_camera_top.jpg", + "images/task_00008_16_240_camera_top.jpg", + "images/task_00008_16_245_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00008", + "split": "eval", + "split_group_id": "task_00008__episode_000016", + "recording_id": null, + "episode_id": 16, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00008_42_135_camera_top.jpg", + "images/task_00008_42_138_camera_top.jpg", + "images/task_00008_42_141_camera_top.jpg", + "images/task_00008_42_144_camera_top.jpg", + "images/task_00008_42_147_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00008", + "split": "eval", + "split_group_id": "task_00008__episode_000042", + "recording_id": null, + "episode_id": 42, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the left side of the scene\nA. toward the right side of the scene\nC. toward the bottom of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the left side of the scene" + } + ], + "images": [ + "images/task_00008_53_130_camera_top.jpg", + "images/task_00008_53_135_camera_top.jpg", + "images/task_00008_53_140_camera_top.jpg", + "images/task_00008_53_145_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00008", + "split": "eval", + "split_group_id": "task_00008__episode_000053", + "recording_id": null, + "episode_id": 53, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00008_106_367_camera_top.jpg", + "images/task_00008_106_370_camera_top.jpg", + "images/task_00008_106_373_camera_top.jpg", + "images/task_00008_106_376_camera_top.jpg", + "images/task_00008_106_379_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00008", + "split": "eval", + "split_group_id": "task_00008__episode_000106", + "recording_id": null, + "episode_id": 106, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00008_117_305_camera_top.jpg", + "images/task_00008_117_308_camera_top.jpg", + "images/task_00008_117_311_camera_top.jpg", + "images/task_00008_117_314_camera_top.jpg", + "images/task_00008_117_317_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00008", + "split": "eval", + "split_group_id": "task_00008__episode_000117", + "recording_id": null, + "episode_id": 117, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. hold and carry\nC. contact\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/task_00008_120_1022_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00008", + "split": "eval", + "split_group_id": "task_00008__episode_000120", + "recording_id": null, + "episode_id": 120, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. approach" + } + ], + "images": [ + "images/task_00009_2_99_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00009", + "split": "eval", + "split_group_id": "task_00009__episode_000002", + "recording_id": null, + "episode_id": 2, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the right side of the scene\nA. toward the top of the scene\nB. toward the bottom of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the right side of the scene" + } + ], + "images": [ + "images/task_00009_5_282_camera_top.jpg", + "images/task_00009_5_287_camera_top.jpg", + "images/task_00009_5_292_camera_top.jpg", + "images/task_00009_5_297_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00009", + "split": "eval", + "split_group_id": "task_00009__episode_000005", + "recording_id": null, + "episode_id": 5, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. approach" + } + ], + "images": [ + "images/task_00009_10_111_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00009", + "split": "eval", + "split_group_id": "task_00009__episode_000010", + "recording_id": null, + "episode_id": 10, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the right side of the scene\nA. toward the left side of the scene\nB. toward the bottom of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the right side of the scene" + } + ], + "images": [ + "images/task_00009_18_218_camera_top.jpg", + "images/task_00009_18_223_camera_top.jpg", + "images/task_00009_18_228_camera_top.jpg", + "images/task_00009_18_233_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00009", + "split": "eval", + "split_group_id": "task_00009__episode_000018", + "recording_id": null, + "episode_id": 18, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the right side of the scene\nA. toward the top of the scene\nB. toward the bottom of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the right side of the scene" + } + ], + "images": [ + "images/task_00009_34_100_camera_top.jpg", + "images/task_00009_34_105_camera_top.jpg", + "images/task_00009_34_110_camera_top.jpg", + "images/task_00009_34_115_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00009", + "split": "eval", + "split_group_id": "task_00009__episode_000034", + "recording_id": null, + "episode_id": 34, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. release\nC. approach\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/task_00009_47_500_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00009", + "split": "eval", + "split_group_id": "task_00009__episode_000047", + "recording_id": null, + "episode_id": 47, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. pre-approach\nC. release\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. approach" + } + ], + "images": [ + "images/task_00009_52_104_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00009", + "split": "eval", + "split_group_id": "task_00009__episode_000052", + "recording_id": null, + "episode_id": 52, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. approach\nC. pre-approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/task_00009_52_389_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00009", + "split": "eval", + "split_group_id": "task_00009__episode_000052", + "recording_id": null, + "episode_id": 52, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. pre-approach\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/task_00009_52_384_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00009", + "split": "eval", + "split_group_id": "task_00009__episode_000052", + "recording_id": null, + "episode_id": 52, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. approach\nC. transfer\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. pre-approach" + } + ], + "images": [ + "images/task_00009_60_20_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00009", + "split": "eval", + "split_group_id": "task_00009__episode_000060", + "recording_id": null, + "episode_id": 60, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. transfer\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/task_00009_65_460_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00009", + "split": "eval", + "split_group_id": "task_00009__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. release\nC. pre-approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/task_00009_68_351_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00009", + "split": "eval", + "split_group_id": "task_00009__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. pre-approach\nC. release\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/task_00010_5_1019_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00010", + "split": "eval", + "split_group_id": "task_00010__episode_000005", + "recording_id": null, + "episode_id": 5, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00010_23_503_camera_top.jpg", + "images/task_00010_23_933_camera_top.jpg", + "images/task_00010_23_173_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00010", + "split": "eval", + "split_group_id": "task_00010__episode_000023", + "recording_id": null, + "episode_id": 23, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the left side of the scene\nA. toward the top of the scene\nB. toward the right side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the left side of the scene" + } + ], + "images": [ + "images/task_00010_23_389_camera_top.jpg", + "images/task_00010_23_394_camera_top.jpg", + "images/task_00010_23_399_camera_top.jpg", + "images/task_00010_23_404_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00010", + "split": "eval", + "split_group_id": "task_00010__episode_000023", + "recording_id": null, + "episode_id": 23, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00010_23_364_camera_top_t-6.jpg", + "images/task_00010_23_367_camera_top_t-3.jpg", + "images/task_00010_23_370_camera_top_t0.jpg", + "images/task_00010_23_373_camera_top_t+3.jpg", + "images/task_00010_23_376_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00010", + "split": "eval", + "split_group_id": "task_00010__episode_000023", + "recording_id": null, + "episode_id": 23, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00010_23_261_camera_top_t-6.jpg", + "images/task_00010_23_264_camera_top_t-3.jpg", + "images/task_00010_23_267_camera_top_t0.jpg", + "images/task_00010_23_270_camera_top_t+3.jpg", + "images/task_00010_23_273_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00010", + "split": "eval", + "split_group_id": "task_00010__episode_000023", + "recording_id": null, + "episode_id": 23, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_00010_23_356_camera_top.jpg", + "images/task_00010_23_554_camera_top.jpg", + "images/task_00010_23_163_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00010", + "split": "eval", + "split_group_id": "task_00010__episode_000023", + "recording_id": null, + "episode_id": 23, + "camera": "camera_top", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. release\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/task_00010_23_994_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00010", + "split": "eval", + "split_group_id": "task_00010__episode_000023", + "recording_id": null, + "episode_id": 23, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00010_23_816_camera_top.jpg", + "images/task_00010_23_819_camera_top.jpg", + "images/task_00010_23_822_camera_top.jpg", + "images/task_00010_23_825_camera_top.jpg", + "images/task_00010_23_828_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00010", + "split": "eval", + "split_group_id": "task_00010__episode_000023", + "recording_id": null, + "episode_id": 23, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00010_37_381_camera_top.jpg", + "images/task_00010_37_139_camera_top.jpg", + "images/task_00010_37_569_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00010", + "split": "eval", + "split_group_id": "task_00010__episode_000037", + "recording_id": null, + "episode_id": 37, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00010_42_1054_camera_top_t-6.jpg", + "images/task_00010_42_1057_camera_top_t-3.jpg", + "images/task_00010_42_1060_camera_top_t0.jpg", + "images/task_00010_42_1063_camera_top_t+3.jpg", + "images/task_00010_42_1066_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00010", + "split": "eval", + "split_group_id": "task_00010__episode_000042", + "recording_id": null, + "episode_id": 42, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00010_42_665_camera_top.jpg", + "images/task_00010_42_668_camera_top.jpg", + "images/task_00010_42_671_camera_top.jpg", + "images/task_00010_42_674_camera_top.jpg", + "images/task_00010_42_677_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00010", + "split": "eval", + "split_group_id": "task_00010__episode_000042", + "recording_id": null, + "episode_id": 42, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_00010_42_408_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00010", + "split": "eval", + "split_group_id": "task_00010__episode_000042", + "recording_id": null, + "episode_id": 42, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_00010_42_433_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00010", + "split": "eval", + "split_group_id": "task_00010__episode_000042", + "recording_id": null, + "episode_id": 42, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. pre-approach\nC. contact\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/task_00010_42_141_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00010", + "split": "eval", + "split_group_id": "task_00010__episode_000042", + "recording_id": null, + "episode_id": 42, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00010_42_926_camera_top.jpg", + "images/task_00010_42_727_camera_top.jpg", + "images/task_00010_42_354_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00010", + "split": "eval", + "split_group_id": "task_00010__episode_000042", + "recording_id": null, + "episode_id": 42, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_00010_42_587_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00010", + "split": "eval", + "split_group_id": "task_00010__episode_000042", + "recording_id": null, + "episode_id": 42, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the right side of the scene\nA. toward the left side of the scene\nB. toward the bottom of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the right side of the scene" + } + ], + "images": [ + "images/task_00010_42_1010_camera_top.jpg", + "images/task_00010_42_1015_camera_top.jpg", + "images/task_00010_42_1020_camera_top.jpg", + "images/task_00010_42_1025_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00010", + "split": "eval", + "split_group_id": "task_00010__episode_000042", + "recording_id": null, + "episode_id": 42, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00010_42_608_camera_top.jpg", + "images/task_00010_42_611_camera_top.jpg", + "images/task_00010_42_614_camera_top.jpg", + "images/task_00010_42_617_camera_top.jpg", + "images/task_00010_42_620_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00010", + "split": "eval", + "split_group_id": "task_00010__episode_000042", + "recording_id": null, + "episode_id": 42, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00010_42_855_camera_top.jpg", + "images/task_00010_42_349_camera_top.jpg", + "images/task_00010_42_1015_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00010", + "split": "eval", + "split_group_id": "task_00010__episode_000042", + "recording_id": null, + "episode_id": 42, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. approach\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/task_00010_47_1026_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00010", + "split": "eval", + "split_group_id": "task_00010__episode_000047", + "recording_id": null, + "episode_id": 47, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00010_65_292_camera_top.jpg", + "images/task_00010_65_295_camera_top.jpg", + "images/task_00010_65_298_camera_top.jpg", + "images/task_00010_65_301_camera_top.jpg", + "images/task_00010_65_304_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00010", + "split": "eval", + "split_group_id": "task_00010__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00010_66_505_camera_top.jpg", + "images/task_00010_66_543_camera_top.jpg", + "images/task_00010_66_137_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00010", + "split": "eval", + "split_group_id": "task_00010__episode_000066", + "recording_id": null, + "episode_id": 66, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the left side of the scene\nA. toward the top of the scene\nB. toward the right side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the left side of the scene" + } + ], + "images": [ + "images/task_00010_76_127_camera_top.jpg", + "images/task_00010_76_132_camera_top.jpg", + "images/task_00010_76_137_camera_top.jpg", + "images/task_00010_76_142_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00010", + "split": "eval", + "split_group_id": "task_00010__episode_000076", + "recording_id": null, + "episode_id": 76, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00010_95_1039_camera_top.jpg", + "images/task_00010_95_1042_camera_top.jpg", + "images/task_00010_95_1045_camera_top.jpg", + "images/task_00010_95_1048_camera_top.jpg", + "images/task_00010_95_1051_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00010", + "split": "eval", + "split_group_id": "task_00010__episode_000095", + "recording_id": null, + "episode_id": 95, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00010_106_392_camera_top.jpg", + "images/task_00010_106_395_camera_top.jpg", + "images/task_00010_106_398_camera_top.jpg", + "images/task_00010_106_401_camera_top.jpg", + "images/task_00010_106_404_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00010", + "split": "eval", + "split_group_id": "task_00010__episode_000106", + "recording_id": null, + "episode_id": 106, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00010_108_318_camera_top_t-6.jpg", + "images/task_00010_108_321_camera_top_t-3.jpg", + "images/task_00010_108_324_camera_top_t0.jpg", + "images/task_00010_108_327_camera_top_t+3.jpg", + "images/task_00010_108_330_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00010", + "split": "eval", + "split_group_id": "task_00010__episode_000108", + "recording_id": null, + "episode_id": 108, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. hold and carry\nC. approach\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. approach" + } + ], + "images": [ + "images/task_00010_108_567_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00010", + "split": "eval", + "split_group_id": "task_00010__episode_000108", + "recording_id": null, + "episode_id": 108, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00010_108_1095_camera_top_t-6.jpg", + "images/task_00010_108_1098_camera_top_t-3.jpg", + "images/task_00010_108_1101_camera_top_t0.jpg", + "images/task_00010_108_1104_camera_top_t+3.jpg", + "images/task_00010_108_1107_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00010", + "split": "eval", + "split_group_id": "task_00010__episode_000108", + "recording_id": null, + "episode_id": 108, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_00010_108_178_camera_top.jpg", + "images/task_00010_108_275_camera_top.jpg", + "images/task_00010_108_809_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00010", + "split": "eval", + "split_group_id": "task_00010__episode_000108", + "recording_id": null, + "episode_id": 108, + "camera": "camera_top", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00010_108_255_camera_top.jpg", + "images/task_00010_108_258_camera_top.jpg", + "images/task_00010_108_261_camera_top.jpg", + "images/task_00010_108_264_camera_top.jpg", + "images/task_00010_108_267_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00010", + "split": "eval", + "split_group_id": "task_00010__episode_000108", + "recording_id": null, + "episode_id": 108, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the left side of the scene\nA. toward the top of the scene\nC. toward the bottom of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the left side of the scene" + } + ], + "images": [ + "images/task_00010_108_418_camera_top.jpg", + "images/task_00010_108_423_camera_top.jpg", + "images/task_00010_108_428_camera_top.jpg", + "images/task_00010_108_433_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00010", + "split": "eval", + "split_group_id": "task_00010__episode_000108", + "recording_id": null, + "episode_id": 108, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the left side of the scene\nB. toward the top of the scene\nC. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00010_127_1123_camera_top.jpg", + "images/task_00010_127_1128_camera_top.jpg", + "images/task_00010_127_1133_camera_top.jpg", + "images/task_00010_127_1138_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00010", + "split": "eval", + "split_group_id": "task_00010__episode_000127", + "recording_id": null, + "episode_id": 127, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. transfer\nC. pre-approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. pre-approach" + } + ], + "images": [ + "images/task_00011_5_596_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00011", + "split": "eval", + "split_group_id": "task_00011__episode_000005", + "recording_id": null, + "episode_id": 5, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. release\nC. approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/task_00011_41_452_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00011", + "split": "eval", + "split_group_id": "task_00011__episode_000041", + "recording_id": null, + "episode_id": 41, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00011_41_205_camera_top.jpg", + "images/task_00011_41_122_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00011", + "split": "eval", + "split_group_id": "task_00011__episode_000041", + "recording_id": null, + "episode_id": 41, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00011_41_43_camera_top.jpg", + "images/task_00011_41_46_camera_top.jpg", + "images/task_00011_41_49_camera_top.jpg", + "images/task_00011_41_52_camera_top.jpg", + "images/task_00011_41_55_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00011", + "split": "eval", + "split_group_id": "task_00011__episode_000041", + "recording_id": null, + "episode_id": 41, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00011_41_165_camera_top.jpg", + "images/task_00011_41_168_camera_top.jpg", + "images/task_00011_41_171_camera_top.jpg", + "images/task_00011_41_174_camera_top.jpg", + "images/task_00011_41_177_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00011", + "split": "eval", + "split_group_id": "task_00011__episode_000041", + "recording_id": null, + "episode_id": 41, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. hold and carry\nC. approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. approach" + } + ], + "images": [ + "images/task_00011_41_125_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00011", + "split": "eval", + "split_group_id": "task_00011__episode_000041", + "recording_id": null, + "episode_id": 41, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_00011_41_135_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00011", + "split": "eval", + "split_group_id": "task_00011__episode_000041", + "recording_id": null, + "episode_id": 41, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_00011_41_235_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00011", + "split": "eval", + "split_group_id": "task_00011__episode_000041", + "recording_id": null, + "episode_id": 41, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. contact\nC. transfer\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/task_00011_41_494_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00011", + "split": "eval", + "split_group_id": "task_00011__episode_000041", + "recording_id": null, + "episode_id": 41, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00011_41_85_camera_top.jpg", + "images/task_00011_41_88_camera_top.jpg", + "images/task_00011_41_91_camera_top.jpg", + "images/task_00011_41_94_camera_top.jpg", + "images/task_00011_41_97_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00011", + "split": "eval", + "split_group_id": "task_00011__episode_000041", + "recording_id": null, + "episode_id": 41, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_00011_41_116_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00011", + "split": "eval", + "split_group_id": "task_00011__episode_000041", + "recording_id": null, + "episode_id": 41, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. release\nC. pre-approach\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. approach" + } + ], + "images": [ + "images/task_00011_51_20_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00011", + "split": "eval", + "split_group_id": "task_00011__episode_000051", + "recording_id": null, + "episode_id": 51, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the left side of the scene\nA. toward the top of the scene\nB. toward the bottom of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the left side of the scene" + } + ], + "images": [ + "images/task_00011_62_114_camera_top.jpg", + "images/task_00011_62_119_camera_top.jpg", + "images/task_00011_62_124_camera_top.jpg", + "images/task_00011_62_129_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00011", + "split": "eval", + "split_group_id": "task_00011__episode_000062", + "recording_id": null, + "episode_id": 62, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. hold and carry\nC. contact\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. pre-approach" + } + ], + "images": [ + "images/task_00011_76_485_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00011", + "split": "eval", + "split_group_id": "task_00011__episode_000076", + "recording_id": null, + "episode_id": 76, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the right side of the scene\nA. toward the top of the scene\nB. toward the left side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the right side of the scene" + } + ], + "images": [ + "images/task_00011_90_1151_camera_top.jpg", + "images/task_00011_90_1156_camera_top.jpg", + "images/task_00011_90_1161_camera_top.jpg", + "images/task_00011_90_1166_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00011", + "split": "eval", + "split_group_id": "task_00011__episode_000090", + "recording_id": null, + "episode_id": 90, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_00012_184_169_camera_top.jpg", + "images/task_00012_184_137_camera_top.jpg", + "images/task_00012_184_324_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00012", + "split": "eval", + "split_group_id": "task_00012__episode_000184", + "recording_id": null, + "episode_id": 184, + "camera": "camera_top", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00012_184_398_camera_top.jpg", + "images/task_00012_184_401_camera_top.jpg", + "images/task_00012_184_404_camera_top.jpg", + "images/task_00012_184_407_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00012", + "split": "eval", + "split_group_id": "task_00012__episode_000184", + "recording_id": null, + "episode_id": 184, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00012_184_296_camera_top.jpg", + "images/task_00012_184_144_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00012", + "split": "eval", + "split_group_id": "task_00012__episode_000184", + "recording_id": null, + "episode_id": 184, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the left side of the scene\nA. toward the bottom of the scene\nB. toward the right side of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the left side of the scene" + } + ], + "images": [ + "images/task_00012_184_392_camera_top.jpg", + "images/task_00012_184_397_camera_top.jpg", + "images/task_00012_184_402_camera_top.jpg", + "images/task_00012_184_407_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00012", + "split": "eval", + "split_group_id": "task_00012__episode_000184", + "recording_id": null, + "episode_id": 184, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00012_184_667_camera_top.jpg", + "images/task_00012_184_136_camera_top.jpg", + "images/task_00012_184_240_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00012", + "split": "eval", + "split_group_id": "task_00012__episode_000184", + "recording_id": null, + "episode_id": 184, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00012_184_339_camera_top.jpg", + "images/task_00012_184_342_camera_top.jpg", + "images/task_00012_184_345_camera_top.jpg", + "images/task_00012_184_348_camera_top.jpg", + "images/task_00012_184_351_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00012", + "split": "eval", + "split_group_id": "task_00012__episode_000184", + "recording_id": null, + "episode_id": 184, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_00012_184_422_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00012", + "split": "eval", + "split_group_id": "task_00012__episode_000184", + "recording_id": null, + "episode_id": 184, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_00012_184_196_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00012", + "split": "eval", + "split_group_id": "task_00012__episode_000184", + "recording_id": null, + "episode_id": 184, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00013_0_814_camera_top.jpg", + "images/task_00013_0_817_camera_top.jpg", + "images/task_00013_0_820_camera_top.jpg", + "images/task_00013_0_823_camera_top.jpg", + "images/task_00013_0_826_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00013", + "split": "eval", + "split_group_id": "task_00013__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00013_0_374_camera_top_t-6.jpg", + "images/task_00013_0_377_camera_top_t-3.jpg", + "images/task_00013_0_380_camera_top_t0.jpg", + "images/task_00013_0_383_camera_top_t+3.jpg", + "images/task_00013_0_386_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00013", + "split": "eval", + "split_group_id": "task_00013__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the top of the scene\nA. toward the right side of the scene\nB. toward the left side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the top of the scene" + } + ], + "images": [ + "images/task_00013_0_1425_camera_top.jpg", + "images/task_00013_0_1430_camera_top.jpg", + "images/task_00013_0_1435_camera_top.jpg", + "images/task_00013_0_1440_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00013", + "split": "eval", + "split_group_id": "task_00013__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00013_0_322_camera_top.jpg", + "images/task_00013_0_1306_camera_top.jpg", + "images/task_00013_0_624_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00013", + "split": "eval", + "split_group_id": "task_00013__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. hold and carry\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. pre-approach" + } + ], + "images": [ + "images/task_00013_0_264_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00013", + "split": "eval", + "split_group_id": "task_00013__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00013_0_521_camera_top.jpg", + "images/task_00013_0_524_camera_top.jpg", + "images/task_00013_0_527_camera_top.jpg", + "images/task_00013_0_530_camera_top.jpg", + "images/task_00013_0_533_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00013", + "split": "eval", + "split_group_id": "task_00013__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00013_50_622_camera_top.jpg", + "images/task_00013_50_625_camera_top.jpg", + "images/task_00013_50_628_camera_top.jpg", + "images/task_00013_50_631_camera_top.jpg", + "images/task_00013_50_634_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00013", + "split": "eval", + "split_group_id": "task_00013__episode_000050", + "recording_id": null, + "episode_id": 50, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. pre-approach\nC. contact\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/task_00013_93_100_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00013", + "split": "eval", + "split_group_id": "task_00013__episode_000093", + "recording_id": null, + "episode_id": 93, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00014_10_357_camera_top.jpg", + "images/task_00014_10_1471_camera_top.jpg", + "images/task_00014_10_802_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00014", + "split": "eval", + "split_group_id": "task_00014__episode_000010", + "recording_id": null, + "episode_id": 10, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00014_15_449_camera_top.jpg", + "images/task_00014_15_452_camera_top.jpg", + "images/task_00014_15_455_camera_top.jpg", + "images/task_00014_15_458_camera_top.jpg", + "images/task_00014_15_461_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00014", + "split": "eval", + "split_group_id": "task_00014__episode_000015", + "recording_id": null, + "episode_id": 15, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00014_15_293_camera_top.jpg", + "images/task_00014_15_296_camera_top.jpg", + "images/task_00014_15_299_camera_top.jpg", + "images/task_00014_15_302_camera_top.jpg", + "images/task_00014_15_305_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00014", + "split": "eval", + "split_group_id": "task_00014__episode_000015", + "recording_id": null, + "episode_id": 15, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00014_15_265_camera_top.jpg", + "images/task_00014_15_1268_camera_top.jpg", + "images/task_00014_15_583_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00014", + "split": "eval", + "split_group_id": "task_00014__episode_000015", + "recording_id": null, + "episode_id": 15, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00014_15_1000_camera_top.jpg", + "images/task_00014_15_1003_camera_top.jpg", + "images/task_00014_15_1006_camera_top.jpg", + "images/task_00014_15_1009_camera_top.jpg", + "images/task_00014_15_1012_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00014", + "split": "eval", + "split_group_id": "task_00014__episode_000015", + "recording_id": null, + "episode_id": 15, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00014_15_283_camera_top.jpg", + "images/task_00014_15_286_camera_top.jpg", + "images/task_00014_15_289_camera_top.jpg", + "images/task_00014_15_292_camera_top.jpg", + "images/task_00014_15_295_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00014", + "split": "eval", + "split_group_id": "task_00014__episode_000015", + "recording_id": null, + "episode_id": 15, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_00014_15_1278_camera_top.jpg", + "images/task_00014_15_553_camera_top.jpg", + "images/task_00014_15_1658_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00014", + "split": "eval", + "split_group_id": "task_00014__episode_000015", + "recording_id": null, + "episode_id": 15, + "camera": "camera_top", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00014_15_1413_camera_top.jpg", + "images/task_00014_15_941_camera_top.jpg", + "images/task_00014_15_267_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00014", + "split": "eval", + "split_group_id": "task_00014__episode_000015", + "recording_id": null, + "episode_id": 15, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. release\nC. contact\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. approach" + } + ], + "images": [ + "images/task_00014_44_432_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00014", + "split": "eval", + "split_group_id": "task_00014__episode_000044", + "recording_id": null, + "episode_id": 44, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. approach\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. approach" + } + ], + "images": [ + "images/task_00014_44_465_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00014", + "split": "eval", + "split_group_id": "task_00014__episode_000044", + "recording_id": null, + "episode_id": 44, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. contact\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/task_00014_44_134_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00014", + "split": "eval", + "split_group_id": "task_00014__episode_000044", + "recording_id": null, + "episode_id": 44, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. release\nC. contact\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/task_00014_63_1179_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00014", + "split": "eval", + "split_group_id": "task_00014__episode_000063", + "recording_id": null, + "episode_id": 63, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. release\nC. hold and carry\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. pre-approach" + } + ], + "images": [ + "images/task_00014_63_286_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00014", + "split": "eval", + "split_group_id": "task_00014__episode_000063", + "recording_id": null, + "episode_id": 63, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. contact\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/task_00014_63_403_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00014", + "split": "eval", + "split_group_id": "task_00014__episode_000063", + "recording_id": null, + "episode_id": 63, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. contact\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/task_00014_63_744_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00014", + "split": "eval", + "split_group_id": "task_00014__episode_000063", + "recording_id": null, + "episode_id": 63, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. release\nC. hold and carry\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/task_00014_63_861_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00014", + "split": "eval", + "split_group_id": "task_00014__episode_000063", + "recording_id": null, + "episode_id": 63, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. pre-approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/task_00014_63_330_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00014", + "split": "eval", + "split_group_id": "task_00014__episode_000063", + "recording_id": null, + "episode_id": 63, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00014_63_521_camera_top.jpg", + "images/task_00014_63_1301_camera_top.jpg", + "images/task_00014_63_324_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00014", + "split": "eval", + "split_group_id": "task_00014__episode_000063", + "recording_id": null, + "episode_id": 63, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00014_107_553_camera_top.jpg", + "images/task_00014_107_556_camera_top.jpg", + "images/task_00014_107_559_camera_top.jpg", + "images/task_00014_107_562_camera_top.jpg", + "images/task_00014_107_565_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00014", + "split": "eval", + "split_group_id": "task_00014__episode_000107", + "recording_id": null, + "episode_id": 107, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_00014_107_764_camera_top.jpg", + "images/task_00014_107_415_camera_top.jpg", + "images/task_00014_107_628_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00014", + "split": "eval", + "split_group_id": "task_00014__episode_000107", + "recording_id": null, + "episode_id": 107, + "camera": "camera_top", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00014_107_1311_camera_top_t-6.jpg", + "images/task_00014_107_1314_camera_top_t-3.jpg", + "images/task_00014_107_1317_camera_top_t0.jpg", + "images/task_00014_107_1320_camera_top_t+3.jpg", + "images/task_00014_107_1323_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00014", + "split": "eval", + "split_group_id": "task_00014__episode_000107", + "recording_id": null, + "episode_id": 107, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00014_107_660_camera_top.jpg", + "images/task_00014_107_1400_camera_top.jpg", + "images/task_00014_107_395_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00014", + "split": "eval", + "split_group_id": "task_00014__episode_000107", + "recording_id": null, + "episode_id": 107, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00014_107_702_camera_top.jpg", + "images/task_00014_107_705_camera_top.jpg", + "images/task_00014_107_708_camera_top.jpg", + "images/task_00014_107_711_camera_top.jpg", + "images/task_00014_107_714_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00014", + "split": "eval", + "split_group_id": "task_00014__episode_000107", + "recording_id": null, + "episode_id": 107, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the left side of the scene\nA. toward the top of the scene\nB. toward the right side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the left side of the scene" + } + ], + "images": [ + "images/task_00014_107_486_camera_top.jpg", + "images/task_00014_107_491_camera_top.jpg", + "images/task_00014_107_496_camera_top.jpg", + "images/task_00014_107_501_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00014", + "split": "eval", + "split_group_id": "task_00014__episode_000107", + "recording_id": null, + "episode_id": 107, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the left side of the scene\nA. toward the bottom of the scene\nC. toward the right side of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the left side of the scene" + } + ], + "images": [ + "images/task_00015_7_155_camera_top.jpg", + "images/task_00015_7_160_camera_top.jpg", + "images/task_00015_7_165_camera_top.jpg", + "images/task_00015_7_170_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00015", + "split": "eval", + "split_group_id": "task_00015__episode_000007", + "recording_id": null, + "episode_id": 7, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. release\nC. transfer\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/task_00015_31_837_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00015", + "split": "eval", + "split_group_id": "task_00015__episode_000031", + "recording_id": null, + "episode_id": 31, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00015_70_264_camera_top.jpg", + "images/task_00015_70_267_camera_top.jpg", + "images/task_00015_70_270_camera_top.jpg", + "images/task_00015_70_273_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00015", + "split": "eval", + "split_group_id": "task_00015__episode_000070", + "recording_id": null, + "episode_id": 70, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00015_70_559_camera_top.jpg", + "images/task_00015_70_741_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00015", + "split": "eval", + "split_group_id": "task_00015__episode_000070", + "recording_id": null, + "episode_id": 70, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00015_70_546_camera_top_t-6.jpg", + "images/task_00015_70_549_camera_top_t-3.jpg", + "images/task_00015_70_552_camera_top_t0.jpg", + "images/task_00015_70_555_camera_top_t+3.jpg", + "images/task_00015_70_558_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00015", + "split": "eval", + "split_group_id": "task_00015__episode_000070", + "recording_id": null, + "episode_id": 70, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the bottom of the scene\nC. toward the top of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00015_70_610_camera_top.jpg", + "images/task_00015_70_615_camera_top.jpg", + "images/task_00015_70_620_camera_top.jpg", + "images/task_00015_70_625_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00015", + "split": "eval", + "split_group_id": "task_00015__episode_000070", + "recording_id": null, + "episode_id": 70, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the left side of the scene\nB. toward the bottom of the scene\nC. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00015_70_293_camera_top.jpg", + "images/task_00015_70_298_camera_top.jpg", + "images/task_00015_70_303_camera_top.jpg", + "images/task_00015_70_308_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00015", + "split": "eval", + "split_group_id": "task_00015__episode_000070", + "recording_id": null, + "episode_id": 70, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00015_70_722_camera_top.jpg", + "images/task_00015_70_518_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00015", + "split": "eval", + "split_group_id": "task_00015__episode_000070", + "recording_id": null, + "episode_id": 70, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_00015_70_684_camera_top.jpg", + "images/task_00015_70_385_camera_top.jpg", + "images/task_00015_70_766_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00015", + "split": "eval", + "split_group_id": "task_00015__episode_000070", + "recording_id": null, + "episode_id": 70, + "camera": "camera_top", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00015_70_418_camera_top.jpg", + "images/task_00015_70_421_camera_top.jpg", + "images/task_00015_70_424_camera_top.jpg", + "images/task_00015_70_427_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00015", + "split": "eval", + "split_group_id": "task_00015__episode_000070", + "recording_id": null, + "episode_id": 70, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00015_70_263_camera_top.jpg", + "images/task_00015_70_266_camera_top.jpg", + "images/task_00015_70_269_camera_top.jpg", + "images/task_00015_70_272_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00015", + "split": "eval", + "split_group_id": "task_00015__episode_000070", + "recording_id": null, + "episode_id": 70, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00015_70_573_camera_top.jpg", + "images/task_00015_70_576_camera_top.jpg", + "images/task_00015_70_579_camera_top.jpg", + "images/task_00015_70_582_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00015", + "split": "eval", + "split_group_id": "task_00015__episode_000070", + "recording_id": null, + "episode_id": 70, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. release\nC. transfer\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. approach" + } + ], + "images": [ + "images/task_00015_101_776_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00015", + "split": "eval", + "split_group_id": "task_00015__episode_000101", + "recording_id": null, + "episode_id": 101, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the right side of the scene\nB. toward the top of the scene\nC. toward the bottom of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the right side of the scene" + } + ], + "images": [ + "images/task_00016_2_273_camera_top.jpg", + "images/task_00016_2_278_camera_top.jpg", + "images/task_00016_2_283_camera_top.jpg", + "images/task_00016_2_288_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00016", + "split": "eval", + "split_group_id": "task_00016__episode_000002", + "recording_id": null, + "episode_id": 2, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00016_2_249_camera_top.jpg", + "images/task_00016_2_252_camera_top.jpg", + "images/task_00016_2_255_camera_top.jpg", + "images/task_00016_2_258_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00016", + "split": "eval", + "split_group_id": "task_00016__episode_000002", + "recording_id": null, + "episode_id": 2, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00016_2_243_camera_top.jpg", + "images/task_00016_2_246_camera_top.jpg", + "images/task_00016_2_249_camera_top.jpg", + "images/task_00016_2_252_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00016", + "split": "eval", + "split_group_id": "task_00016__episode_000002", + "recording_id": null, + "episode_id": 2, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_00016_2_1473_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00016", + "split": "eval", + "split_group_id": "task_00016__episode_000002", + "recording_id": null, + "episode_id": 2, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/task_00016_2_449_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00016", + "split": "eval", + "split_group_id": "task_00016__episode_000002", + "recording_id": null, + "episode_id": 2, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the right side of the scene\nA. toward the bottom of the scene\nB. toward the left side of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the right side of the scene" + } + ], + "images": [ + "images/task_00016_2_1547_camera_top.jpg", + "images/task_00016_2_1552_camera_top.jpg", + "images/task_00016_2_1557_camera_top.jpg", + "images/task_00016_2_1562_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00016", + "split": "eval", + "split_group_id": "task_00016__episode_000002", + "recording_id": null, + "episode_id": 2, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00016_2_196_camera_top.jpg", + "images/task_00016_2_305_camera_top.jpg", + "images/task_00016_2_1486_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00016", + "split": "eval", + "split_group_id": "task_00016__episode_000002", + "recording_id": null, + "episode_id": 2, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00016_2_248_camera_top.jpg", + "images/task_00016_2_251_camera_top.jpg", + "images/task_00016_2_254_camera_top.jpg", + "images/task_00016_2_257_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00016", + "split": "eval", + "split_group_id": "task_00016__episode_000002", + "recording_id": null, + "episode_id": 2, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00016_2_195_camera_top.jpg", + "images/task_00016_2_753_camera_top.jpg", + "images/task_00016_2_1504_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00016", + "split": "eval", + "split_group_id": "task_00016__episode_000002", + "recording_id": null, + "episode_id": 2, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00016_97_251_camera_top.jpg", + "images/task_00016_97_360_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00016", + "split": "eval", + "split_group_id": "task_00016__episode_000097", + "recording_id": null, + "episode_id": 97, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00016_97_508_camera_top.jpg", + "images/task_00016_97_511_camera_top.jpg", + "images/task_00016_97_514_camera_top.jpg", + "images/task_00016_97_517_camera_top.jpg", + "images/task_00016_97_520_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00016", + "split": "eval", + "split_group_id": "task_00016__episode_000097", + "recording_id": null, + "episode_id": 97, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00016_97_321_camera_top.jpg", + "images/task_00016_97_324_camera_top.jpg", + "images/task_00016_97_327_camera_top.jpg", + "images/task_00016_97_330_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00016", + "split": "eval", + "split_group_id": "task_00016__episode_000097", + "recording_id": null, + "episode_id": 97, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00016_97_393_camera_top.jpg", + "images/task_00016_97_396_camera_top.jpg", + "images/task_00016_97_399_camera_top.jpg", + "images/task_00016_97_402_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00016", + "split": "eval", + "split_group_id": "task_00016__episode_000097", + "recording_id": null, + "episode_id": 97, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00016_97_1245_camera_top.jpg", + "images/task_00016_97_1248_camera_top.jpg", + "images/task_00016_97_1251_camera_top.jpg", + "images/task_00016_97_1254_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00016", + "split": "eval", + "split_group_id": "task_00016__episode_000097", + "recording_id": null, + "episode_id": 97, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00016_97_611_camera_top.jpg", + "images/task_00016_97_614_camera_top.jpg", + "images/task_00016_97_617_camera_top.jpg", + "images/task_00016_97_620_camera_top.jpg", + "images/task_00016_97_623_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00016", + "split": "eval", + "split_group_id": "task_00016__episode_000097", + "recording_id": null, + "episode_id": 97, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the top of the scene\nA. toward the left side of the scene\nC. toward the right side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the top of the scene" + } + ], + "images": [ + "images/task_00016_97_1201_camera_top.jpg", + "images/task_00016_97_1206_camera_top.jpg", + "images/task_00016_97_1211_camera_top.jpg", + "images/task_00016_97_1216_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00016", + "split": "eval", + "split_group_id": "task_00016__episode_000097", + "recording_id": null, + "episode_id": 97, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00016_97_1113_camera_top_t-6.jpg", + "images/task_00016_97_1116_camera_top_t-3.jpg", + "images/task_00016_97_1119_camera_top_t0.jpg", + "images/task_00016_97_1122_camera_top_t+3.jpg", + "images/task_00016_97_1125_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00016", + "split": "eval", + "split_group_id": "task_00016__episode_000097", + "recording_id": null, + "episode_id": 97, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00016_97_666_camera_top_t-6.jpg", + "images/task_00016_97_669_camera_top_t-3.jpg", + "images/task_00016_97_672_camera_top_t0.jpg", + "images/task_00016_97_675_camera_top_t+3.jpg", + "images/task_00016_97_678_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00016", + "split": "eval", + "split_group_id": "task_00016__episode_000097", + "recording_id": null, + "episode_id": 97, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/task_00017_23_1280_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000023", + "recording_id": null, + "episode_id": 23, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. transfer\nC. approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/task_00017_23_1279_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000023", + "recording_id": null, + "episode_id": 23, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00017_23_1571_camera_top.jpg", + "images/task_00017_23_1294_camera_top.jpg", + "images/task_00017_23_1259_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000023", + "recording_id": null, + "episode_id": 23, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00017_23_1483_camera_top.jpg", + "images/task_00017_23_1486_camera_top.jpg", + "images/task_00017_23_1489_camera_top.jpg", + "images/task_00017_23_1492_camera_top.jpg", + "images/task_00017_23_1495_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000023", + "recording_id": null, + "episode_id": 23, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. transfer\nC. release\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/task_00017_23_2336_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000023", + "recording_id": null, + "episode_id": 23, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00017_23_1607_camera_top.jpg", + "images/task_00017_23_1610_camera_top.jpg", + "images/task_00017_23_1613_camera_top.jpg", + "images/task_00017_23_1616_camera_top.jpg", + "images/task_00017_23_1619_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000023", + "recording_id": null, + "episode_id": 23, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the top of the scene\nB. toward the right side of the scene\nC. toward the bottom of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the top of the scene" + } + ], + "images": [ + "images/task_00017_23_2241_camera_top.jpg", + "images/task_00017_23_2246_camera_top.jpg", + "images/task_00017_23_2251_camera_top.jpg", + "images/task_00017_23_2256_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000023", + "recording_id": null, + "episode_id": 23, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00017_23_1744_camera_top.jpg", + "images/task_00017_23_1747_camera_top.jpg", + "images/task_00017_23_1750_camera_top.jpg", + "images/task_00017_23_1753_camera_top.jpg", + "images/task_00017_23_1756_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000023", + "recording_id": null, + "episode_id": 23, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. contact\nC. approach\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. approach" + } + ], + "images": [ + "images/task_00017_23_1248_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000023", + "recording_id": null, + "episode_id": 23, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the bottom of the scene\nB. toward the left side of the scene\nC. toward the right side of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00017_25_1078_camera_top.jpg", + "images/task_00017_25_1083_camera_top.jpg", + "images/task_00017_25_1088_camera_top.jpg", + "images/task_00017_25_1093_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000025", + "recording_id": null, + "episode_id": 25, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the top of the scene\nA. toward the bottom of the scene\nC. toward the left side of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the top of the scene" + } + ], + "images": [ + "images/task_00017_25_1950_camera_top.jpg", + "images/task_00017_25_1955_camera_top.jpg", + "images/task_00017_25_1960_camera_top.jpg", + "images/task_00017_25_1965_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000025", + "recording_id": null, + "episode_id": 25, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00017_25_324_camera_top.jpg", + "images/task_00017_25_327_camera_top.jpg", + "images/task_00017_25_330_camera_top.jpg", + "images/task_00017_25_333_camera_top.jpg", + "images/task_00017_25_336_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000025", + "recording_id": null, + "episode_id": 25, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. hold and carry\nC. release\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/task_00017_25_690_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000025", + "recording_id": null, + "episode_id": 25, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the bottom of the scene\nA. toward the top of the scene\nC. toward the left side of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00017_25_1071_camera_top.jpg", + "images/task_00017_25_1076_camera_top.jpg", + "images/task_00017_25_1081_camera_top.jpg", + "images/task_00017_25_1086_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000025", + "recording_id": null, + "episode_id": 25, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00017_25_241_camera_top.jpg", + "images/task_00017_25_341_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000025", + "recording_id": null, + "episode_id": 25, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00017_25_721_camera_top.jpg", + "images/task_00017_25_280_camera_top.jpg", + "images/task_00017_25_232_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000025", + "recording_id": null, + "episode_id": 25, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_00017_25_488_camera_top.jpg", + "images/task_00017_25_725_camera_top.jpg", + "images/task_00017_25_252_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000025", + "recording_id": null, + "episode_id": 25, + "camera": "camera_top", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00017_25_581_camera_top_t-6.jpg", + "images/task_00017_25_584_camera_top_t-3.jpg", + "images/task_00017_25_587_camera_top_t0.jpg", + "images/task_00017_25_590_camera_top_t+3.jpg", + "images/task_00017_25_593_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000025", + "recording_id": null, + "episode_id": 25, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00017_25_731_camera_top.jpg", + "images/task_00017_25_1433_camera_top.jpg", + "images/task_00017_25_226_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000025", + "recording_id": null, + "episode_id": 25, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the top of the scene\nA. toward the right side of the scene\nB. toward the left side of the scene\nC. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the top of the scene" + } + ], + "images": [ + "images/task_00017_25_1943_camera_top.jpg", + "images/task_00017_25_1948_camera_top.jpg", + "images/task_00017_25_1953_camera_top.jpg", + "images/task_00017_25_1958_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000025", + "recording_id": null, + "episode_id": 25, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00017_31_1197_camera_top_t-6.jpg", + "images/task_00017_31_1200_camera_top_t-3.jpg", + "images/task_00017_31_1203_camera_top_t0.jpg", + "images/task_00017_31_1206_camera_top_t+3.jpg", + "images/task_00017_31_1209_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000031", + "recording_id": null, + "episode_id": 31, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00017_31_1865_camera_top_t-6.jpg", + "images/task_00017_31_1868_camera_top_t-3.jpg", + "images/task_00017_31_1871_camera_top_t0.jpg", + "images/task_00017_31_1874_camera_top_t+3.jpg", + "images/task_00017_31_1877_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000031", + "recording_id": null, + "episode_id": 31, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00017_31_1199_camera_top.jpg", + "images/task_00017_31_1735_camera_top.jpg", + "images/task_00017_31_1328_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000031", + "recording_id": null, + "episode_id": 31, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00017_31_1847_camera_top.jpg", + "images/task_00017_31_1678_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000031", + "recording_id": null, + "episode_id": 31, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. contact\nC. transfer\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. approach" + } + ], + "images": [ + "images/task_00017_31_1785_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000031", + "recording_id": null, + "episode_id": 31, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00017_31_1796_camera_top.jpg", + "images/task_00017_31_1610_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000031", + "recording_id": null, + "episode_id": 31, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00017_31_1614_camera_top.jpg", + "images/task_00017_31_1617_camera_top.jpg", + "images/task_00017_31_1620_camera_top.jpg", + "images/task_00017_31_1623_camera_top.jpg", + "images/task_00017_31_1626_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000031", + "recording_id": null, + "episode_id": 31, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00017_31_1185_camera_top.jpg", + "images/task_00017_31_1731_camera_top.jpg", + "images/task_00017_31_1863_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000031", + "recording_id": null, + "episode_id": 31, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_00017_31_1205_camera_top.jpg", + "images/task_00017_31_1768_camera_top.jpg", + "images/task_00017_31_1329_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000031", + "recording_id": null, + "episode_id": 31, + "camera": "camera_top", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. hold and carry\nC. approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. pre-approach" + } + ], + "images": [ + "images/task_00017_31_1127_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000031", + "recording_id": null, + "episode_id": 31, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. hold and carry\nC. contact\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/task_00017_31_1898_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000031", + "recording_id": null, + "episode_id": 31, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. contact\nC. transfer\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/task_00017_31_1286_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000031", + "recording_id": null, + "episode_id": 31, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. contact\nC. hold and carry\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/task_00017_57_699_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000057", + "recording_id": null, + "episode_id": 57, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00017_57_582_camera_top.jpg", + "images/task_00017_57_585_camera_top.jpg", + "images/task_00017_57_588_camera_top.jpg", + "images/task_00017_57_591_camera_top.jpg", + "images/task_00017_57_594_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000057", + "recording_id": null, + "episode_id": 57, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the bottom of the scene\nA. toward the right side of the scene\nC. toward the left side of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00017_57_1032_camera_top.jpg", + "images/task_00017_57_1037_camera_top.jpg", + "images/task_00017_57_1042_camera_top.jpg", + "images/task_00017_57_1047_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000057", + "recording_id": null, + "episode_id": 57, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00017_57_352_camera_top.jpg", + "images/task_00017_57_960_camera_top.jpg", + "images/task_00017_57_250_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000057", + "recording_id": null, + "episode_id": 57, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00017_57_366_camera_top.jpg", + "images/task_00017_57_369_camera_top.jpg", + "images/task_00017_57_372_camera_top.jpg", + "images/task_00017_57_375_camera_top.jpg", + "images/task_00017_57_378_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000057", + "recording_id": null, + "episode_id": 57, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. hold and carry\nC. transfer\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. hold and carry" + } + ], + "images": [ + "images/task_00017_57_555_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000057", + "recording_id": null, + "episode_id": 57, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the left side of the scene\nC. toward the bottom of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00017_57_1788_camera_top.jpg", + "images/task_00017_57_1793_camera_top.jpg", + "images/task_00017_57_1798_camera_top.jpg", + "images/task_00017_57_1803_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000057", + "recording_id": null, + "episode_id": 57, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00017_57_221_camera_top.jpg", + "images/task_00017_57_345_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000057", + "recording_id": null, + "episode_id": 57, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00017_69_592_camera_top.jpg", + "images/task_00017_69_595_camera_top.jpg", + "images/task_00017_69_598_camera_top.jpg", + "images/task_00017_69_601_camera_top.jpg", + "images/task_00017_69_604_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000069", + "recording_id": null, + "episode_id": 69, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. transfer\nC. release\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. pre-approach" + } + ], + "images": [ + "images/task_00017_69_169_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000069", + "recording_id": null, + "episode_id": 69, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. release\nC. transfer\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/task_00017_69_892_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000069", + "recording_id": null, + "episode_id": 69, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00017_69_433_camera_top.jpg", + "images/task_00017_69_436_camera_top.jpg", + "images/task_00017_69_439_camera_top.jpg", + "images/task_00017_69_442_camera_top.jpg", + "images/task_00017_69_445_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000069", + "recording_id": null, + "episode_id": 69, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the left side of the scene\nC. toward the top of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00017_69_1954_camera_top.jpg", + "images/task_00017_69_1959_camera_top.jpg", + "images/task_00017_69_1964_camera_top.jpg", + "images/task_00017_69_1969_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000069", + "recording_id": null, + "episode_id": 69, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00017_69_1714_camera_top_t-6.jpg", + "images/task_00017_69_1717_camera_top_t-3.jpg", + "images/task_00017_69_1720_camera_top_t0.jpg", + "images/task_00017_69_1723_camera_top_t+3.jpg", + "images/task_00017_69_1726_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000069", + "recording_id": null, + "episode_id": 69, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00017_69_224_camera_top.jpg", + "images/task_00017_69_526_camera_top.jpg", + "images/task_00017_69_740_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000069", + "recording_id": null, + "episode_id": 69, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. release\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/task_00017_77_1174_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000077", + "recording_id": null, + "episode_id": 77, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_00017_77_2019_camera_top.jpg", + "images/task_00017_77_1201_camera_top.jpg", + "images/task_00017_77_1864_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000077", + "recording_id": null, + "episode_id": 77, + "camera": "camera_top", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00017_77_1898_camera_top.jpg", + "images/task_00017_77_1901_camera_top.jpg", + "images/task_00017_77_1904_camera_top.jpg", + "images/task_00017_77_1907_camera_top.jpg", + "images/task_00017_77_1910_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000077", + "recording_id": null, + "episode_id": 77, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_00017_77_1742_camera_top.jpg", + "images/task_00017_77_1151_camera_top.jpg", + "images/task_00017_77_1700_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000077", + "recording_id": null, + "episode_id": 77, + "camera": "camera_top", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. transfer\nC. contact\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/task_00017_77_1922_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000077", + "recording_id": null, + "episode_id": 77, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. hold and carry\nC. approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. approach" + } + ], + "images": [ + "images/task_00017_77_1783_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000077", + "recording_id": null, + "episode_id": 77, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the left side of the scene\nA. toward the bottom of the scene\nC. toward the top of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the left side of the scene" + } + ], + "images": [ + "images/task_00017_77_1637_camera_top.jpg", + "images/task_00017_77_1642_camera_top.jpg", + "images/task_00017_77_1647_camera_top.jpg", + "images/task_00017_77_1652_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000077", + "recording_id": null, + "episode_id": 77, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. pre-approach" + } + ], + "images": [ + "images/task_00017_77_1125_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000077", + "recording_id": null, + "episode_id": 77, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00017_77_1815_camera_top.jpg", + "images/task_00017_77_1818_camera_top.jpg", + "images/task_00017_77_1821_camera_top.jpg", + "images/task_00017_77_1824_camera_top.jpg", + "images/task_00017_77_1827_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000077", + "recording_id": null, + "episode_id": 77, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00017_89_867_camera_top_t-6.jpg", + "images/task_00017_89_870_camera_top_t-3.jpg", + "images/task_00017_89_873_camera_top_t0.jpg", + "images/task_00017_89_876_camera_top_t+3.jpg", + "images/task_00017_89_879_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000089", + "recording_id": null, + "episode_id": 89, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00017_89_273_camera_top.jpg", + "images/task_00017_89_192_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000089", + "recording_id": null, + "episode_id": 89, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00017_89_1522_camera_top.jpg", + "images/task_00017_89_394_camera_top.jpg", + "images/task_00017_89_196_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000089", + "recording_id": null, + "episode_id": 89, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_00017_89_412_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000089", + "recording_id": null, + "episode_id": 89, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the top of the scene\nC. toward the bottom of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00017_89_1892_camera_top.jpg", + "images/task_00017_89_1897_camera_top.jpg", + "images/task_00017_89_1902_camera_top.jpg", + "images/task_00017_89_1907_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000089", + "recording_id": null, + "episode_id": 89, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_00017_89_170_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000089", + "recording_id": null, + "episode_id": 89, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00017_89_1429_camera_top_t-6.jpg", + "images/task_00017_89_1432_camera_top_t-3.jpg", + "images/task_00017_89_1435_camera_top_t0.jpg", + "images/task_00017_89_1438_camera_top_t+3.jpg", + "images/task_00017_89_1441_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000089", + "recording_id": null, + "episode_id": 89, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the left side of the scene\nC. toward the bottom of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00017_89_1899_camera_top.jpg", + "images/task_00017_89_1904_camera_top.jpg", + "images/task_00017_89_1909_camera_top.jpg", + "images/task_00017_89_1914_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00017", + "split": "eval", + "split_group_id": "task_00017__episode_000089", + "recording_id": null, + "episode_id": 89, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. contact\nC. transfer\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/task_00018_2_964_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00018", + "split": "eval", + "split_group_id": "task_00018__episode_000002", + "recording_id": null, + "episode_id": 2, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/task_00018_42_701_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00018", + "split": "eval", + "split_group_id": "task_00018__episode_000042", + "recording_id": null, + "episode_id": 42, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. release\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/task_00018_68_704_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00018", + "split": "eval", + "split_group_id": "task_00018__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. approach\nC. transfer\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. approach" + } + ], + "images": [ + "images/task_00018_115_452_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00018", + "split": "eval", + "split_group_id": "task_00018__episode_000115", + "recording_id": null, + "episode_id": 115, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00019_31_495_camera_top.jpg", + "images/task_00019_31_498_camera_top.jpg", + "images/task_00019_31_501_camera_top.jpg", + "images/task_00019_31_504_camera_top.jpg", + "images/task_00019_31_507_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00019", + "split": "eval", + "split_group_id": "task_00019__episode_000031", + "recording_id": null, + "episode_id": 31, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00019_31_223_camera_top.jpg", + "images/task_00019_31_373_camera_top.jpg", + "images/task_00019_31_411_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00019", + "split": "eval", + "split_group_id": "task_00019__episode_000031", + "recording_id": null, + "episode_id": 31, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00019_31_272_camera_top_t-6.jpg", + "images/task_00019_31_275_camera_top_t-3.jpg", + "images/task_00019_31_278_camera_top_t0.jpg", + "images/task_00019_31_281_camera_top_t+3.jpg", + "images/task_00019_31_284_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00019", + "split": "eval", + "split_group_id": "task_00019__episode_000031", + "recording_id": null, + "episode_id": 31, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00019_31_677_camera_top.jpg", + "images/task_00019_31_680_camera_top.jpg", + "images/task_00019_31_683_camera_top.jpg", + "images/task_00019_31_686_camera_top.jpg", + "images/task_00019_31_689_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00019", + "split": "eval", + "split_group_id": "task_00019__episode_000031", + "recording_id": null, + "episode_id": 31, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the top of the scene\nA. toward the left side of the scene\nB. toward the right side of the scene\nC. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the top of the scene" + } + ], + "images": [ + "images/task_00019_31_727_camera_top.jpg", + "images/task_00019_31_732_camera_top.jpg", + "images/task_00019_31_737_camera_top.jpg", + "images/task_00019_31_742_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00019", + "split": "eval", + "split_group_id": "task_00019__episode_000031", + "recording_id": null, + "episode_id": 31, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00019_31_910_camera_top.jpg", + "images/task_00019_31_383_camera_top.jpg", + "images/task_00019_31_230_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00019", + "split": "eval", + "split_group_id": "task_00019__episode_000031", + "recording_id": null, + "episode_id": 31, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_00019_31_379_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00019", + "split": "eval", + "split_group_id": "task_00019__episode_000031", + "recording_id": null, + "episode_id": 31, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00019_31_299_camera_top.jpg", + "images/task_00019_31_302_camera_top.jpg", + "images/task_00019_31_305_camera_top.jpg", + "images/task_00019_31_308_camera_top.jpg", + "images/task_00019_31_311_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00019", + "split": "eval", + "split_group_id": "task_00019__episode_000031", + "recording_id": null, + "episode_id": 31, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_00019_31_663_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00019", + "split": "eval", + "split_group_id": "task_00019__episode_000031", + "recording_id": null, + "episode_id": 31, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_00019_66_430_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00019", + "split": "eval", + "split_group_id": "task_00019__episode_000066", + "recording_id": null, + "episode_id": 66, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00019_66_335_camera_top.jpg", + "images/task_00019_66_112_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00019", + "split": "eval", + "split_group_id": "task_00019__episode_000066", + "recording_id": null, + "episode_id": 66, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00019_66_208_camera_top_t-6.jpg", + "images/task_00019_66_211_camera_top_t-3.jpg", + "images/task_00019_66_214_camera_top_t0.jpg", + "images/task_00019_66_217_camera_top_t+3.jpg", + "images/task_00019_66_220_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00019", + "split": "eval", + "split_group_id": "task_00019__episode_000066", + "recording_id": null, + "episode_id": 66, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00019_66_342_camera_top.jpg", + "images/task_00019_66_1151_camera_top.jpg", + "images/task_00019_66_113_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00019", + "split": "eval", + "split_group_id": "task_00019__episode_000066", + "recording_id": null, + "episode_id": 66, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00019_66_501_camera_top_t-6.jpg", + "images/task_00019_66_504_camera_top_t-3.jpg", + "images/task_00019_66_507_camera_top_t0.jpg", + "images/task_00019_66_510_camera_top_t+3.jpg", + "images/task_00019_66_513_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00019", + "split": "eval", + "split_group_id": "task_00019__episode_000066", + "recording_id": null, + "episode_id": 66, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. approach\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/task_00019_66_1251_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00019", + "split": "eval", + "split_group_id": "task_00019__episode_000066", + "recording_id": null, + "episode_id": 66, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. hold and carry\nC. contact\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/task_00019_66_484_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00019", + "split": "eval", + "split_group_id": "task_00019__episode_000066", + "recording_id": null, + "episode_id": 66, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00019_66_308_camera_top.jpg", + "images/task_00019_66_129_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00019", + "split": "eval", + "split_group_id": "task_00019__episode_000066", + "recording_id": null, + "episode_id": 66, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. transfer\nC. pre-approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. approach" + } + ], + "images": [ + "images/task_00019_66_117_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00019", + "split": "eval", + "split_group_id": "task_00019__episode_000066", + "recording_id": null, + "episode_id": 66, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00019_74_1072_camera_top.jpg", + "images/task_00019_74_1075_camera_top.jpg", + "images/task_00019_74_1078_camera_top.jpg", + "images/task_00019_74_1081_camera_top.jpg", + "images/task_00019_74_1084_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00019", + "split": "eval", + "split_group_id": "task_00019__episode_000074", + "recording_id": null, + "episode_id": 74, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00019_74_337_camera_top.jpg", + "images/task_00019_74_340_camera_top.jpg", + "images/task_00019_74_343_camera_top.jpg", + "images/task_00019_74_346_camera_top.jpg", + "images/task_00019_74_349_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00019", + "split": "eval", + "split_group_id": "task_00019__episode_000074", + "recording_id": null, + "episode_id": 74, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00019_74_821_camera_top.jpg", + "images/task_00019_74_824_camera_top.jpg", + "images/task_00019_74_827_camera_top.jpg", + "images/task_00019_74_830_camera_top.jpg", + "images/task_00019_74_833_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00019", + "split": "eval", + "split_group_id": "task_00019__episode_000074", + "recording_id": null, + "episode_id": 74, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. hold and carry\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/task_00019_74_1304_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00019", + "split": "eval", + "split_group_id": "task_00019__episode_000074", + "recording_id": null, + "episode_id": 74, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00019_74_1044_camera_top.jpg", + "images/task_00019_74_996_camera_top.jpg", + "images/task_00019_74_293_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00019", + "split": "eval", + "split_group_id": "task_00019__episode_000074", + "recording_id": null, + "episode_id": 74, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00019_74_1227_camera_top.jpg", + "images/task_00019_74_1293_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00019", + "split": "eval", + "split_group_id": "task_00019__episode_000074", + "recording_id": null, + "episode_id": 74, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. release\nC. transfer\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/task_00019_74_382_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00019", + "split": "eval", + "split_group_id": "task_00019__episode_000074", + "recording_id": null, + "episode_id": 74, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00019_74_885_camera_top.jpg", + "images/task_00019_74_888_camera_top.jpg", + "images/task_00019_74_891_camera_top.jpg", + "images/task_00019_74_894_camera_top.jpg", + "images/task_00019_74_897_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00019", + "split": "eval", + "split_group_id": "task_00019__episode_000074", + "recording_id": null, + "episode_id": 74, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. hold and carry\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/task_00019_74_1322_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00019", + "split": "eval", + "split_group_id": "task_00019__episode_000074", + "recording_id": null, + "episode_id": 74, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00019_87_749_camera_top_t-6.jpg", + "images/task_00019_87_746_camera_top_t-3.jpg", + "images/task_00019_87_749_camera_top_t0.jpg", + "images/task_00019_87_752_camera_top_t+3.jpg", + "images/task_00019_87_755_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00019", + "split": "eval", + "split_group_id": "task_00019__episode_000087", + "recording_id": null, + "episode_id": 87, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00019_87_493_camera_top.jpg", + "images/task_00019_87_822_camera_top.jpg", + "images/task_00019_87_276_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00019", + "split": "eval", + "split_group_id": "task_00019__episode_000087", + "recording_id": null, + "episode_id": 87, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00019_87_749_camera_top_t-6.jpg", + "images/task_00019_87_752_camera_top_t-3.jpg", + "images/task_00019_87_755_camera_top_t0.jpg", + "images/task_00019_87_758_camera_top_t+3.jpg", + "images/task_00019_87_755_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00019", + "split": "eval", + "split_group_id": "task_00019__episode_000087", + "recording_id": null, + "episode_id": 87, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00019_87_566_camera_top.jpg", + "images/task_00019_87_569_camera_top.jpg", + "images/task_00019_87_572_camera_top.jpg", + "images/task_00019_87_575_camera_top.jpg", + "images/task_00019_87_578_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00019", + "split": "eval", + "split_group_id": "task_00019__episode_000087", + "recording_id": null, + "episode_id": 87, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the left side of the scene\nA. toward the bottom of the scene\nB. toward the top of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the left side of the scene" + } + ], + "images": [ + "images/task_00019_87_825_camera_top.jpg", + "images/task_00019_87_830_camera_top.jpg", + "images/task_00019_87_835_camera_top.jpg", + "images/task_00019_87_840_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00019", + "split": "eval", + "split_group_id": "task_00019__episode_000087", + "recording_id": null, + "episode_id": 87, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. release\nC. transfer\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. hold and carry" + } + ], + "images": [ + "images/task_00019_87_588_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00019", + "split": "eval", + "split_group_id": "task_00019__episode_000087", + "recording_id": null, + "episode_id": 87, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00019_87_145_camera_top_t-6.jpg", + "images/task_00019_87_148_camera_top_t-3.jpg", + "images/task_00019_87_151_camera_top_t0.jpg", + "images/task_00019_87_154_camera_top_t+3.jpg", + "images/task_00019_87_157_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00019", + "split": "eval", + "split_group_id": "task_00019__episode_000087", + "recording_id": null, + "episode_id": 87, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00019_87_291_camera_top.jpg", + "images/task_00019_87_101_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00019", + "split": "eval", + "split_group_id": "task_00019__episode_000087", + "recording_id": null, + "episode_id": 87, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the bottom of the scene\nB. toward the top of the scene\nC. toward the left side of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00019_108_862_camera_top.jpg", + "images/task_00019_108_867_camera_top.jpg", + "images/task_00019_108_872_camera_top.jpg", + "images/task_00019_108_877_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00019", + "split": "eval", + "split_group_id": "task_00019__episode_000108", + "recording_id": null, + "episode_id": 108, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. pre-approach\nC. approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/task_00019_108_1133_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00019", + "split": "eval", + "split_group_id": "task_00019__episode_000108", + "recording_id": null, + "episode_id": 108, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00019_108_679_camera_top.jpg", + "images/task_00019_108_528_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00019", + "split": "eval", + "split_group_id": "task_00019__episode_000108", + "recording_id": null, + "episode_id": 108, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. pre-approach\nC. release\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. pre-approach" + } + ], + "images": [ + "images/task_00019_108_478_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00019", + "split": "eval", + "split_group_id": "task_00019__episode_000108", + "recording_id": null, + "episode_id": 108, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00019_108_681_camera_top.jpg", + "images/task_00019_108_684_camera_top.jpg", + "images/task_00019_108_687_camera_top.jpg", + "images/task_00019_108_690_camera_top.jpg", + "images/task_00019_108_693_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00019", + "split": "eval", + "split_group_id": "task_00019__episode_000108", + "recording_id": null, + "episode_id": 108, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_00019_108_1073_camera_top.jpg", + "images/task_00019_108_529_camera_top.jpg", + "images/task_00019_108_1267_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00019", + "split": "eval", + "split_group_id": "task_00019__episode_000108", + "recording_id": null, + "episode_id": 108, + "camera": "camera_top", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. pre-approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/task_00019_108_910_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00019", + "split": "eval", + "split_group_id": "task_00019__episode_000108", + "recording_id": null, + "episode_id": 108, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00019_108_927_camera_top.jpg", + "images/task_00019_108_930_camera_top.jpg", + "images/task_00019_108_933_camera_top.jpg", + "images/task_00019_108_936_camera_top.jpg", + "images/task_00019_108_939_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00019", + "split": "eval", + "split_group_id": "task_00019__episode_000108", + "recording_id": null, + "episode_id": 108, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00019_108_779_camera_top.jpg", + "images/task_00019_108_782_camera_top.jpg", + "images/task_00019_108_785_camera_top.jpg", + "images/task_00019_108_788_camera_top.jpg", + "images/task_00019_108_791_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00019", + "split": "eval", + "split_group_id": "task_00019__episode_000108", + "recording_id": null, + "episode_id": 108, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00019_108_678_camera_top.jpg", + "images/task_00019_108_556_camera_top.jpg", + "images/task_00019_108_1029_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00019", + "split": "eval", + "split_group_id": "task_00019__episode_000108", + "recording_id": null, + "episode_id": 108, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00019_108_1022_camera_top.jpg", + "images/task_00019_108_768_camera_top.jpg", + "images/task_00019_108_555_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00019", + "split": "eval", + "split_group_id": "task_00019__episode_000108", + "recording_id": null, + "episode_id": 108, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the top of the scene\nB. toward the right side of the scene\nC. toward the left side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the top of the scene" + } + ], + "images": [ + "images/task_00019_130_998_camera_top.jpg", + "images/task_00019_130_1003_camera_top.jpg", + "images/task_00019_130_1008_camera_top.jpg", + "images/task_00019_130_1013_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00019", + "split": "eval", + "split_group_id": "task_00019__episode_000130", + "recording_id": null, + "episode_id": 130, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00020_113_1275_camera_top.jpg", + "images/task_00020_113_223_camera_top.jpg", + "images/task_00020_113_145_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00020", + "split": "eval", + "split_group_id": "task_00020__episode_000113", + "recording_id": null, + "episode_id": 113, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00020_115_145_camera_top.jpg", + "images/task_00020_115_457_camera_top.jpg", + "images/task_00020_115_372_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00020", + "split": "eval", + "split_group_id": "task_00020__episode_000115", + "recording_id": null, + "episode_id": 115, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the left side of the scene\nA. toward the top of the scene\nB. toward the bottom of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the left side of the scene" + } + ], + "images": [ + "images/task_00021_0_1546_camera_top.jpg", + "images/task_00021_0_1551_camera_top.jpg", + "images/task_00021_0_1556_camera_top.jpg", + "images/task_00021_0_1561_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00021", + "split": "eval", + "split_group_id": "task_00021__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the left side of the scene\nB. toward the bottom of the scene\nC. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00021_67_2486_camera_top.jpg", + "images/task_00021_67_2491_camera_top.jpg", + "images/task_00021_67_2496_camera_top.jpg", + "images/task_00021_67_2501_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00021", + "split": "eval", + "split_group_id": "task_00021__episode_000067", + "recording_id": null, + "episode_id": 67, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. hold and carry\nC. transfer\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. hold and carry" + } + ], + "images": [ + "images/task_00022_11_900_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00022", + "split": "eval", + "split_group_id": "task_00022__episode_000011", + "recording_id": null, + "episode_id": 11, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00022_11_1267_camera_top.jpg", + "images/task_00022_11_1270_camera_top.jpg", + "images/task_00022_11_1273_camera_top.jpg", + "images/task_00022_11_1276_camera_top.jpg", + "images/task_00022_11_1279_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00022", + "split": "eval", + "split_group_id": "task_00022__episode_000011", + "recording_id": null, + "episode_id": 11, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00022_11_1315_camera_top_t-6.jpg", + "images/task_00022_11_1318_camera_top_t-3.jpg", + "images/task_00022_11_1321_camera_top_t0.jpg", + "images/task_00022_11_1324_camera_top_t+3.jpg", + "images/task_00022_11_1327_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00022", + "split": "eval", + "split_group_id": "task_00022__episode_000011", + "recording_id": null, + "episode_id": 11, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00022_11_1107_camera_top.jpg", + "images/task_00022_11_398_camera_top.jpg", + "images/task_00022_11_2511_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00022", + "split": "eval", + "split_group_id": "task_00022__episode_000011", + "recording_id": null, + "episode_id": 11, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the left side of the scene\nB. toward the right side of the scene\nC. toward the bottom of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the left side of the scene" + } + ], + "images": [ + "images/task_00022_11_713_camera_top.jpg", + "images/task_00022_11_718_camera_top.jpg", + "images/task_00022_11_723_camera_top.jpg", + "images/task_00022_11_728_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00022", + "split": "eval", + "split_group_id": "task_00022__episode_000011", + "recording_id": null, + "episode_id": 11, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the right side of the scene\nB. toward the bottom of the scene\nC. toward the top of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the right side of the scene" + } + ], + "images": [ + "images/task_00022_11_872_camera_top.jpg", + "images/task_00022_11_877_camera_top.jpg", + "images/task_00022_11_882_camera_top.jpg", + "images/task_00022_11_887_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00022", + "split": "eval", + "split_group_id": "task_00022__episode_000011", + "recording_id": null, + "episode_id": 11, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00022_11_412_camera_top.jpg", + "images/task_00022_11_415_camera_top.jpg", + "images/task_00022_11_418_camera_top.jpg", + "images/task_00022_11_421_camera_top.jpg", + "images/task_00022_11_424_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00022", + "split": "eval", + "split_group_id": "task_00022__episode_000011", + "recording_id": null, + "episode_id": 11, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the top of the scene\nB. toward the bottom of the scene\nC. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00022_11_815_camera_top.jpg", + "images/task_00022_11_820_camera_top.jpg", + "images/task_00022_11_825_camera_top.jpg", + "images/task_00022_11_830_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00022", + "split": "eval", + "split_group_id": "task_00022__episode_000011", + "recording_id": null, + "episode_id": 11, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00022_32_198_camera_top.jpg", + "images/task_00022_32_201_camera_top.jpg", + "images/task_00022_32_204_camera_top.jpg", + "images/task_00022_32_207_camera_top.jpg", + "images/task_00022_32_210_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00022", + "split": "eval", + "split_group_id": "task_00022__episode_000032", + "recording_id": null, + "episode_id": 32, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. pre-approach\nC. approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. pre-approach" + } + ], + "images": [ + "images/task_00022_32_88_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00022", + "split": "eval", + "split_group_id": "task_00022__episode_000032", + "recording_id": null, + "episode_id": 32, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00022_32_420_camera_top.jpg", + "images/task_00022_32_423_camera_top.jpg", + "images/task_00022_32_426_camera_top.jpg", + "images/task_00022_32_429_camera_top.jpg", + "images/task_00022_32_432_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00022", + "split": "eval", + "split_group_id": "task_00022__episode_000032", + "recording_id": null, + "episode_id": 32, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the left side of the scene\nA. toward the top of the scene\nB. toward the right side of the scene\nC. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the left side of the scene" + } + ], + "images": [ + "images/task_00022_32_178_camera_top.jpg", + "images/task_00022_32_183_camera_top.jpg", + "images/task_00022_32_188_camera_top.jpg", + "images/task_00022_32_193_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00022", + "split": "eval", + "split_group_id": "task_00022__episode_000032", + "recording_id": null, + "episode_id": 32, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. pre-approach\nC. release\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. pre-approach" + } + ], + "images": [ + "images/task_00022_32_81_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00022", + "split": "eval", + "split_group_id": "task_00022__episode_000032", + "recording_id": null, + "episode_id": 32, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00022_32_378_camera_top_t-6.jpg", + "images/task_00022_32_381_camera_top_t-3.jpg", + "images/task_00022_32_384_camera_top_t0.jpg", + "images/task_00022_32_387_camera_top_t+3.jpg", + "images/task_00022_32_390_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00022", + "split": "eval", + "split_group_id": "task_00022__episode_000032", + "recording_id": null, + "episode_id": 32, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00022_32_119_camera_top.jpg", + "images/task_00022_32_496_camera_top.jpg", + "images/task_00022_32_172_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00022", + "split": "eval", + "split_group_id": "task_00022__episode_000032", + "recording_id": null, + "episode_id": 32, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. hold and carry\nC. pre-approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/task_00022_41_168_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00022", + "split": "eval", + "split_group_id": "task_00022__episode_000041", + "recording_id": null, + "episode_id": 41, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00022_41_198_camera_top.jpg", + "images/task_00022_41_106_camera_top.jpg", + "images/task_00022_41_429_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00022", + "split": "eval", + "split_group_id": "task_00022__episode_000041", + "recording_id": null, + "episode_id": 41, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00022_41_202_camera_top_t-6.jpg", + "images/task_00022_41_205_camera_top_t-3.jpg", + "images/task_00022_41_208_camera_top_t0.jpg", + "images/task_00022_41_211_camera_top_t+3.jpg", + "images/task_00022_41_214_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00022", + "split": "eval", + "split_group_id": "task_00022__episode_000041", + "recording_id": null, + "episode_id": 41, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00022_41_276_camera_top.jpg", + "images/task_00022_41_91_camera_top.jpg", + "images/task_00022_41_582_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00022", + "split": "eval", + "split_group_id": "task_00022__episode_000041", + "recording_id": null, + "episode_id": 41, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00022_41_178_camera_top.jpg", + "images/task_00022_41_181_camera_top.jpg", + "images/task_00022_41_184_camera_top.jpg", + "images/task_00022_41_187_camera_top.jpg", + "images/task_00022_41_190_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00022", + "split": "eval", + "split_group_id": "task_00022__episode_000041", + "recording_id": null, + "episode_id": 41, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. pre-approach\nC. release\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/task_00022_41_416_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00022", + "split": "eval", + "split_group_id": "task_00022__episode_000041", + "recording_id": null, + "episode_id": 41, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the left side of the scene\nB. toward the right side of the scene\nC. toward the top of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the left side of the scene" + } + ], + "images": [ + "images/task_00022_82_794_camera_top.jpg", + "images/task_00022_82_799_camera_top.jpg", + "images/task_00022_82_804_camera_top.jpg", + "images/task_00022_82_809_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00022", + "split": "eval", + "split_group_id": "task_00022__episode_000082", + "recording_id": null, + "episode_id": 82, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00022_88_737_camera_top_t-6.jpg", + "images/task_00022_88_740_camera_top_t-3.jpg", + "images/task_00022_88_743_camera_top_t0.jpg", + "images/task_00022_88_746_camera_top_t+3.jpg", + "images/task_00022_88_749_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00022", + "split": "eval", + "split_group_id": "task_00022__episode_000088", + "recording_id": null, + "episode_id": 88, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00022_88_743_camera_top.jpg", + "images/task_00022_88_591_camera_top.jpg", + "images/task_00022_88_936_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00022", + "split": "eval", + "split_group_id": "task_00022__episode_000088", + "recording_id": null, + "episode_id": 88, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00022_88_488_camera_top.jpg", + "images/task_00022_88_491_camera_top.jpg", + "images/task_00022_88_494_camera_top.jpg", + "images/task_00022_88_497_camera_top.jpg", + "images/task_00022_88_500_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00022", + "split": "eval", + "split_group_id": "task_00022__episode_000088", + "recording_id": null, + "episode_id": 88, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the left side of the scene\nA. toward the top of the scene\nB. toward the right side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the left side of the scene" + } + ], + "images": [ + "images/task_00022_88_684_camera_top.jpg", + "images/task_00022_88_689_camera_top.jpg", + "images/task_00022_88_694_camera_top.jpg", + "images/task_00022_88_699_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00022", + "split": "eval", + "split_group_id": "task_00022__episode_000088", + "recording_id": null, + "episode_id": 88, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/task_00022_88_787_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00022", + "split": "eval", + "split_group_id": "task_00022__episode_000088", + "recording_id": null, + "episode_id": 88, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00022_88_393_camera_top_t-6.jpg", + "images/task_00022_88_396_camera_top_t-3.jpg", + "images/task_00022_88_399_camera_top_t0.jpg", + "images/task_00022_88_402_camera_top_t+3.jpg", + "images/task_00022_88_405_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00022", + "split": "eval", + "split_group_id": "task_00022__episode_000088", + "recording_id": null, + "episode_id": 88, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00022_127_281_camera_top.jpg", + "images/task_00022_127_284_camera_top.jpg", + "images/task_00022_127_287_camera_top.jpg", + "images/task_00022_127_290_camera_top.jpg", + "images/task_00022_127_293_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00022", + "split": "eval", + "split_group_id": "task_00022__episode_000127", + "recording_id": null, + "episode_id": 127, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. hold and carry\nC. contact\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/task_00023_0_1021_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00023", + "split": "eval", + "split_group_id": "task_00023__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00023_0_1658_camera_top.jpg", + "images/task_00023_0_1763_camera_top.jpg", + "images/task_00023_0_1524_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00023", + "split": "eval", + "split_group_id": "task_00023__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00023_0_1347_camera_top_t-6.jpg", + "images/task_00023_0_1350_camera_top_t-3.jpg", + "images/task_00023_0_1353_camera_top_t0.jpg", + "images/task_00023_0_1356_camera_top_t+3.jpg", + "images/task_00023_0_1359_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00023", + "split": "eval", + "split_group_id": "task_00023__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00023_0_1060_camera_top.jpg", + "images/task_00023_0_1063_camera_top.jpg", + "images/task_00023_0_1066_camera_top.jpg", + "images/task_00023_0_1069_camera_top.jpg", + "images/task_00023_0_1072_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00023", + "split": "eval", + "split_group_id": "task_00023__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the left side of the scene\nB. toward the bottom of the scene\nC. toward the top of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the left side of the scene" + } + ], + "images": [ + "images/task_00023_0_539_camera_top.jpg", + "images/task_00023_0_544_camera_top.jpg", + "images/task_00023_0_549_camera_top.jpg", + "images/task_00023_0_554_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00023", + "split": "eval", + "split_group_id": "task_00023__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00023_0_1785_camera_top.jpg", + "images/task_00023_0_1644_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00023", + "split": "eval", + "split_group_id": "task_00023__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00023_0_1773_camera_top_t-6.jpg", + "images/task_00023_0_1776_camera_top_t-3.jpg", + "images/task_00023_0_1779_camera_top_t0.jpg", + "images/task_00023_0_1782_camera_top_t+3.jpg", + "images/task_00023_0_1785_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00023", + "split": "eval", + "split_group_id": "task_00023__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00023_157_1002_camera_top.jpg", + "images/task_00023_157_414_camera_top.jpg", + "images/task_00023_157_375_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00023", + "split": "eval", + "split_group_id": "task_00023__episode_000157", + "recording_id": null, + "episode_id": 157, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00023_157_1188_camera_top_t-6.jpg", + "images/task_00023_157_1191_camera_top_t-3.jpg", + "images/task_00023_157_1194_camera_top_t0.jpg", + "images/task_00023_157_1197_camera_top_t+3.jpg", + "images/task_00023_157_1200_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00023", + "split": "eval", + "split_group_id": "task_00023__episode_000157", + "recording_id": null, + "episode_id": 157, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/task_00023_157_503_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00023", + "split": "eval", + "split_group_id": "task_00023__episode_000157", + "recording_id": null, + "episode_id": 157, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00023_157_396_camera_top.jpg", + "images/task_00023_157_1117_camera_top.jpg", + "images/task_00023_157_956_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00023", + "split": "eval", + "split_group_id": "task_00023__episode_000157", + "recording_id": null, + "episode_id": 157, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00023_157_801_camera_top.jpg", + "images/task_00023_157_985_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00023", + "split": "eval", + "split_group_id": "task_00023__episode_000157", + "recording_id": null, + "episode_id": 157, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00023_157_531_camera_top.jpg", + "images/task_00023_157_534_camera_top.jpg", + "images/task_00023_157_537_camera_top.jpg", + "images/task_00023_157_540_camera_top.jpg", + "images/task_00023_157_543_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00023", + "split": "eval", + "split_group_id": "task_00023__episode_000157", + "recording_id": null, + "episode_id": 157, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00023_157_985_camera_top_t-6.jpg", + "images/task_00023_157_988_camera_top_t-3.jpg", + "images/task_00023_157_991_camera_top_t0.jpg", + "images/task_00023_157_994_camera_top_t+3.jpg", + "images/task_00023_157_997_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00023", + "split": "eval", + "split_group_id": "task_00023__episode_000157", + "recording_id": null, + "episode_id": 157, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the right side of the scene\nA. toward the bottom of the scene\nB. toward the left side of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the right side of the scene" + } + ], + "images": [ + "images/task_00023_157_1174_camera_top.jpg", + "images/task_00023_157_1179_camera_top.jpg", + "images/task_00023_157_1184_camera_top.jpg", + "images/task_00023_157_1189_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00023", + "split": "eval", + "split_group_id": "task_00023__episode_000157", + "recording_id": null, + "episode_id": 157, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00023_157_593_camera_top.jpg", + "images/task_00023_157_357_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00023", + "split": "eval", + "split_group_id": "task_00023__episode_000157", + "recording_id": null, + "episode_id": 157, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. release\nC. pre-approach\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/task_00024_53_2067_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00024", + "split": "eval", + "split_group_id": "task_00024__episode_000053", + "recording_id": null, + "episode_id": 53, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. release\nC. pre-approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/task_00024_61_3325_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00024", + "split": "eval", + "split_group_id": "task_00024__episode_000061", + "recording_id": null, + "episode_id": 61, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. transfer\nC. approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. approach" + } + ], + "images": [ + "images/task_00024_61_449_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00024", + "split": "eval", + "split_group_id": "task_00024__episode_000061", + "recording_id": null, + "episode_id": 61, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00024_63_1182_camera_top.jpg", + "images/task_00024_63_1185_camera_top.jpg", + "images/task_00024_63_1188_camera_top.jpg", + "images/task_00024_63_1191_camera_top.jpg", + "images/task_00024_63_1194_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00024", + "split": "eval", + "split_group_id": "task_00024__episode_000063", + "recording_id": null, + "episode_id": 63, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the left side of the scene\nA. toward the right side of the scene\nB. toward the top of the scene\nC. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the left side of the scene" + } + ], + "images": [ + "images/task_00024_98_946_camera_top.jpg", + "images/task_00024_98_951_camera_top.jpg", + "images/task_00024_98_956_camera_top.jpg", + "images/task_00024_98_961_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00024", + "split": "eval", + "split_group_id": "task_00024__episode_000098", + "recording_id": null, + "episode_id": 98, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the right side of the scene\nA. toward the left side of the scene\nB. toward the top of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the right side of the scene" + } + ], + "images": [ + "images/task_00024_103_285_camera_top.jpg", + "images/task_00024_103_290_camera_top.jpg", + "images/task_00024_103_295_camera_top.jpg", + "images/task_00024_103_300_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00024", + "split": "eval", + "split_group_id": "task_00024__episode_000103", + "recording_id": null, + "episode_id": 103, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the top of the scene\nB. toward the bottom of the scene\nC. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00024_127_98_camera_top.jpg", + "images/task_00024_127_103_camera_top.jpg", + "images/task_00024_127_108_camera_top.jpg", + "images/task_00024_127_113_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00024", + "split": "eval", + "split_group_id": "task_00024__episode_000127", + "recording_id": null, + "episode_id": 127, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00025_2_1246_camera_top.jpg", + "images/task_00025_2_912_camera_top.jpg", + "images/task_00025_2_1165_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00025", + "split": "eval", + "split_group_id": "task_00025__episode_000002", + "recording_id": null, + "episode_id": 2, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the right side of the scene\nB. toward the bottom of the scene\nC. toward the top of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the right side of the scene" + } + ], + "images": [ + "images/task_00025_2_1572_camera_top.jpg", + "images/task_00025_2_1577_camera_top.jpg", + "images/task_00025_2_1582_camera_top.jpg", + "images/task_00025_2_1587_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00025", + "split": "eval", + "split_group_id": "task_00025__episode_000002", + "recording_id": null, + "episode_id": 2, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00025_2_1286_camera_top.jpg", + "images/task_00025_2_1289_camera_top.jpg", + "images/task_00025_2_1292_camera_top.jpg", + "images/task_00025_2_1295_camera_top.jpg", + "images/task_00025_2_1298_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00025", + "split": "eval", + "split_group_id": "task_00025__episode_000002", + "recording_id": null, + "episode_id": 2, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00025_2_1164_camera_top.jpg", + "images/task_00025_2_1606_camera_top.jpg", + "images/task_00025_2_1003_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00025", + "split": "eval", + "split_group_id": "task_00025__episode_000002", + "recording_id": null, + "episode_id": 2, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00025_2_1243_camera_top_t-6.jpg", + "images/task_00025_2_1246_camera_top_t-3.jpg", + "images/task_00025_2_1249_camera_top_t0.jpg", + "images/task_00025_2_1252_camera_top_t+3.jpg", + "images/task_00025_2_1255_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00025", + "split": "eval", + "split_group_id": "task_00025__episode_000002", + "recording_id": null, + "episode_id": 2, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. approach\nC. pre-approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/task_00025_2_1715_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00025", + "split": "eval", + "split_group_id": "task_00025__episode_000002", + "recording_id": null, + "episode_id": 2, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_00025_12_453_camera_top.jpg", + "images/task_00025_12_137_camera_top.jpg", + "images/task_00025_12_780_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00025", + "split": "eval", + "split_group_id": "task_00025__episode_000012", + "recording_id": null, + "episode_id": 12, + "camera": "camera_top", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the top of the scene\nA. toward the right side of the scene\nB. toward the left side of the scene\nC. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the top of the scene" + } + ], + "images": [ + "images/task_00025_113_301_camera_top.jpg", + "images/task_00025_113_306_camera_top.jpg", + "images/task_00025_113_311_camera_top.jpg", + "images/task_00025_113_316_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00025", + "split": "eval", + "split_group_id": "task_00025__episode_000113", + "recording_id": null, + "episode_id": 113, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the left side of the scene\nA. toward the top of the scene\nB. toward the right side of the scene\nC. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the left side of the scene" + } + ], + "images": [ + "images/task_00026_3_1705_camera_top.jpg", + "images/task_00026_3_1710_camera_top.jpg", + "images/task_00026_3_1715_camera_top.jpg", + "images/task_00026_3_1720_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00026", + "split": "eval", + "split_group_id": "task_00026__episode_000003", + "recording_id": null, + "episode_id": 3, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the left side of the scene\nA. toward the right side of the scene\nC. toward the top of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the left side of the scene" + } + ], + "images": [ + "images/task_00026_27_1523_camera_top.jpg", + "images/task_00026_27_1528_camera_top.jpg", + "images/task_00026_27_1533_camera_top.jpg", + "images/task_00026_27_1538_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00026", + "split": "eval", + "split_group_id": "task_00026__episode_000027", + "recording_id": null, + "episode_id": 27, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00026_50_1658_camera_top.jpg", + "images/task_00026_50_1887_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00026", + "split": "eval", + "split_group_id": "task_00026__episode_000050", + "recording_id": null, + "episode_id": 50, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the left side of the scene\nA. toward the bottom of the scene\nC. toward the right side of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the left side of the scene" + } + ], + "images": [ + "images/task_00026_50_1903_camera_top.jpg", + "images/task_00026_50_1908_camera_top.jpg", + "images/task_00026_50_1913_camera_top.jpg", + "images/task_00026_50_1918_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00026", + "split": "eval", + "split_group_id": "task_00026__episode_000050", + "recording_id": null, + "episode_id": 50, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00026_50_425_camera_top.jpg", + "images/task_00026_50_428_camera_top.jpg", + "images/task_00026_50_431_camera_top.jpg", + "images/task_00026_50_434_camera_top.jpg", + "images/task_00026_50_437_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00026", + "split": "eval", + "split_group_id": "task_00026__episode_000050", + "recording_id": null, + "episode_id": 50, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the left side of the scene\nB. toward the top of the scene\nC. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00026_50_1237_camera_top.jpg", + "images/task_00026_50_1242_camera_top.jpg", + "images/task_00026_50_1247_camera_top.jpg", + "images/task_00026_50_1252_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00026", + "split": "eval", + "split_group_id": "task_00026__episode_000050", + "recording_id": null, + "episode_id": 50, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00026_50_807_camera_top_t-6.jpg", + "images/task_00026_50_810_camera_top_t-3.jpg", + "images/task_00026_50_813_camera_top_t0.jpg", + "images/task_00026_50_816_camera_top_t+3.jpg", + "images/task_00026_50_819_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00026", + "split": "eval", + "split_group_id": "task_00026__episode_000050", + "recording_id": null, + "episode_id": 50, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00026_50_1203_camera_top.jpg", + "images/task_00026_50_1281_camera_top.jpg", + "images/task_00026_50_332_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00026", + "split": "eval", + "split_group_id": "task_00026__episode_000050", + "recording_id": null, + "episode_id": 50, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00026_50_1272_camera_top.jpg", + "images/task_00026_50_1552_camera_top.jpg", + "images/task_00026_50_1607_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00026", + "split": "eval", + "split_group_id": "task_00026__episode_000050", + "recording_id": null, + "episode_id": 50, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the left side of the scene\nA. toward the bottom of the scene\nC. toward the top of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the left side of the scene" + } + ], + "images": [ + "images/task_00026_88_1302_camera_top.jpg", + "images/task_00026_88_1307_camera_top.jpg", + "images/task_00026_88_1312_camera_top.jpg", + "images/task_00026_88_1317_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00026", + "split": "eval", + "split_group_id": "task_00026__episode_000088", + "recording_id": null, + "episode_id": 88, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the left side of the scene\nB. toward the top of the scene\nC. toward the bottom of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the left side of the scene" + } + ], + "images": [ + "images/task_00026_111_1285_camera_top.jpg", + "images/task_00026_111_1290_camera_top.jpg", + "images/task_00026_111_1295_camera_top.jpg", + "images/task_00026_111_1300_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00026", + "split": "eval", + "split_group_id": "task_00026__episode_000111", + "recording_id": null, + "episode_id": 111, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00026_119_538_camera_top.jpg", + "images/task_00026_119_593_camera_top.jpg", + "images/task_00026_119_667_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00026", + "split": "eval", + "split_group_id": "task_00026__episode_000119", + "recording_id": null, + "episode_id": 119, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. approach\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/task_00026_127_1792_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00026", + "split": "eval", + "split_group_id": "task_00026__episode_000127", + "recording_id": null, + "episode_id": 127, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. approach\nC. transfer\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/task_00026_150_510_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00026", + "split": "eval", + "split_group_id": "task_00026__episode_000150", + "recording_id": null, + "episode_id": 150, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_00026_150_473_camera_top.jpg", + "images/task_00026_150_508_camera_top.jpg", + "images/task_00026_150_1018_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00026", + "split": "eval", + "split_group_id": "task_00026__episode_000150", + "recording_id": null, + "episode_id": 150, + "camera": "camera_top", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. approach\nC. contact\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. approach" + } + ], + "images": [ + "images/task_00026_150_484_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00026", + "split": "eval", + "split_group_id": "task_00026__episode_000150", + "recording_id": null, + "episode_id": 150, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. transfer\nC. approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/task_00026_150_780_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00026", + "split": "eval", + "split_group_id": "task_00026__episode_000150", + "recording_id": null, + "episode_id": 150, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. pre-approach\nC. release\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/task_00026_150_505_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00026", + "split": "eval", + "split_group_id": "task_00026__episode_000150", + "recording_id": null, + "episode_id": 150, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00026_185_371_camera_top_t-6.jpg", + "images/task_00026_185_374_camera_top_t-3.jpg", + "images/task_00026_185_377_camera_top_t0.jpg", + "images/task_00026_185_380_camera_top_t+3.jpg", + "images/task_00026_185_383_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00026", + "split": "eval", + "split_group_id": "task_00026__episode_000185", + "recording_id": null, + "episode_id": 185, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00026_185_1060_camera_top_t-6.jpg", + "images/task_00026_185_1063_camera_top_t-3.jpg", + "images/task_00026_185_1066_camera_top_t0.jpg", + "images/task_00026_185_1069_camera_top_t+3.jpg", + "images/task_00026_185_1072_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00026", + "split": "eval", + "split_group_id": "task_00026__episode_000185", + "recording_id": null, + "episode_id": 185, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00026_185_1428_camera_top.jpg", + "images/task_00026_185_1431_camera_top.jpg", + "images/task_00026_185_1434_camera_top.jpg", + "images/task_00026_185_1437_camera_top.jpg", + "images/task_00026_185_1440_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00026", + "split": "eval", + "split_group_id": "task_00026__episode_000185", + "recording_id": null, + "episode_id": 185, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the right side of the scene\nB. toward the bottom of the scene\nC. toward the top of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the right side of the scene" + } + ], + "images": [ + "images/task_00026_185_654_camera_top.jpg", + "images/task_00026_185_659_camera_top.jpg", + "images/task_00026_185_664_camera_top.jpg", + "images/task_00026_185_669_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00026", + "split": "eval", + "split_group_id": "task_00026__episode_000185", + "recording_id": null, + "episode_id": 185, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00026_185_298_camera_top.jpg", + "images/task_00026_185_301_camera_top.jpg", + "images/task_00026_185_304_camera_top.jpg", + "images/task_00026_185_307_camera_top.jpg", + "images/task_00026_185_310_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00026", + "split": "eval", + "split_group_id": "task_00026__episode_000185", + "recording_id": null, + "episode_id": 185, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00026_185_1097_camera_top.jpg", + "images/task_00026_185_798_camera_top.jpg", + "images/task_00026_185_857_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00026", + "split": "eval", + "split_group_id": "task_00026__episode_000185", + "recording_id": null, + "episode_id": 185, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_00027_18_907_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00027", + "split": "eval", + "split_group_id": "task_00027__episode_000018", + "recording_id": null, + "episode_id": 18, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_00027_18_629_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00027", + "split": "eval", + "split_group_id": "task_00027__episode_000018", + "recording_id": null, + "episode_id": 18, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00027_18_469_camera_top_t-6.jpg", + "images/task_00027_18_472_camera_top_t-3.jpg", + "images/task_00027_18_475_camera_top_t0.jpg", + "images/task_00027_18_478_camera_top_t+3.jpg", + "images/task_00027_18_481_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00027", + "split": "eval", + "split_group_id": "task_00027__episode_000018", + "recording_id": null, + "episode_id": 18, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00027_18_1622_camera_top_t-6.jpg", + "images/task_00027_18_1625_camera_top_t-3.jpg", + "images/task_00027_18_1628_camera_top_t0.jpg", + "images/task_00027_18_1631_camera_top_t+3.jpg", + "images/task_00027_18_1634_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00027", + "split": "eval", + "split_group_id": "task_00027__episode_000018", + "recording_id": null, + "episode_id": 18, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00027_78_534_camera_top.jpg", + "images/task_00027_78_1264_camera_top.jpg", + "images/task_00027_78_358_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00027", + "split": "eval", + "split_group_id": "task_00027__episode_000078", + "recording_id": null, + "episode_id": 78, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. release\nC. pre-approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. pre-approach" + } + ], + "images": [ + "images/task_00027_97_229_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00027", + "split": "eval", + "split_group_id": "task_00027__episode_000097", + "recording_id": null, + "episode_id": 97, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the bottom of the scene\nA. toward the right side of the scene\nC. toward the top of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00027_100_367_camera_top.jpg", + "images/task_00027_100_372_camera_top.jpg", + "images/task_00027_100_377_camera_top.jpg", + "images/task_00027_100_382_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00027", + "split": "eval", + "split_group_id": "task_00027__episode_000100", + "recording_id": null, + "episode_id": 100, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. contact\nC. pre-approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/task_00027_121_246_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00027", + "split": "eval", + "split_group_id": "task_00027__episode_000121", + "recording_id": null, + "episode_id": 121, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. transfer\nC. hold and carry\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/task_00028_2_663_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00028", + "split": "eval", + "split_group_id": "task_00028__episode_000002", + "recording_id": null, + "episode_id": 2, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. pre-approach\nC. release\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. pre-approach" + } + ], + "images": [ + "images/task_00028_7_196_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00028", + "split": "eval", + "split_group_id": "task_00028__episode_000007", + "recording_id": null, + "episode_id": 7, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_00028_10_699_camera_top.jpg", + "images/task_00028_10_193_camera_top.jpg", + "images/task_00028_10_253_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00028", + "split": "eval", + "split_group_id": "task_00028__episode_000010", + "recording_id": null, + "episode_id": 10, + "camera": "camera_top", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_00028_65_262_camera_top.jpg", + "images/task_00028_65_715_camera_top.jpg", + "images/task_00028_65_864_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00028", + "split": "eval", + "split_group_id": "task_00028__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00028_66_762_camera_top.jpg", + "images/task_00028_66_765_camera_top.jpg", + "images/task_00028_66_768_camera_top.jpg", + "images/task_00028_66_771_camera_top.jpg", + "images/task_00028_66_774_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00028", + "split": "eval", + "split_group_id": "task_00028__episode_000066", + "recording_id": null, + "episode_id": 66, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. transfer\nC. release\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/task_00028_100_1010_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00028", + "split": "eval", + "split_group_id": "task_00028__episode_000100", + "recording_id": null, + "episode_id": 100, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the right side of the scene\nB. toward the left side of the scene\nC. toward the top of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the right side of the scene" + } + ], + "images": [ + "images/task_00028_124_699_camera_top.jpg", + "images/task_00028_124_704_camera_top.jpg", + "images/task_00028_124_709_camera_top.jpg", + "images/task_00028_124_714_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00028", + "split": "eval", + "split_group_id": "task_00028__episode_000124", + "recording_id": null, + "episode_id": 124, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00029_13_334_camera_top.jpg", + "images/task_00029_13_337_camera_top.jpg", + "images/task_00029_13_340_camera_top.jpg", + "images/task_00029_13_343_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00029", + "split": "eval", + "split_group_id": "task_00029__episode_000013", + "recording_id": null, + "episode_id": 13, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00029_13_294_camera_top.jpg", + "images/task_00029_13_222_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00029", + "split": "eval", + "split_group_id": "task_00029__episode_000013", + "recording_id": null, + "episode_id": 13, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00029_13_270_camera_top.jpg", + "images/task_00029_13_273_camera_top.jpg", + "images/task_00029_13_276_camera_top.jpg", + "images/task_00029_13_279_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00029", + "split": "eval", + "split_group_id": "task_00029__episode_000013", + "recording_id": null, + "episode_id": 13, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. hold and carry\nC. approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/task_00029_13_1252_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00029", + "split": "eval", + "split_group_id": "task_00029__episode_000013", + "recording_id": null, + "episode_id": 13, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00029_13_117_camera_top.jpg", + "images/task_00029_13_120_camera_top.jpg", + "images/task_00029_13_123_camera_top.jpg", + "images/task_00029_13_126_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00029", + "split": "eval", + "split_group_id": "task_00029__episode_000013", + "recording_id": null, + "episode_id": 13, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00029_13_538_camera_top.jpg", + "images/task_00029_13_541_camera_top.jpg", + "images/task_00029_13_544_camera_top.jpg", + "images/task_00029_13_547_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00029", + "split": "eval", + "split_group_id": "task_00029__episode_000013", + "recording_id": null, + "episode_id": 13, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00029_13_815_camera_top.jpg", + "images/task_00029_13_818_camera_top.jpg", + "images/task_00029_13_821_camera_top.jpg", + "images/task_00029_13_824_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00029", + "split": "eval", + "split_group_id": "task_00029__episode_000013", + "recording_id": null, + "episode_id": 13, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00029_13_179_camera_top.jpg", + "images/task_00029_13_182_camera_top.jpg", + "images/task_00029_13_185_camera_top.jpg", + "images/task_00029_13_188_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00029", + "split": "eval", + "split_group_id": "task_00029__episode_000013", + "recording_id": null, + "episode_id": 13, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00029_84_906_camera_top_t-6.jpg", + "images/task_00029_84_909_camera_top_t-3.jpg", + "images/task_00029_84_912_camera_top_t0.jpg", + "images/task_00029_84_915_camera_top_t+3.jpg", + "images/task_00029_84_918_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00029", + "split": "eval", + "split_group_id": "task_00029__episode_000084", + "recording_id": null, + "episode_id": 84, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00029_84_411_camera_top.jpg", + "images/task_00029_84_414_camera_top.jpg", + "images/task_00029_84_417_camera_top.jpg", + "images/task_00029_84_420_camera_top.jpg", + "images/task_00029_84_423_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00029", + "split": "eval", + "split_group_id": "task_00029__episode_000084", + "recording_id": null, + "episode_id": 84, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the top of the scene\nC. toward the bottom of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00029_84_888_camera_top.jpg", + "images/task_00029_84_893_camera_top.jpg", + "images/task_00029_84_898_camera_top.jpg", + "images/task_00029_84_903_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00029", + "split": "eval", + "split_group_id": "task_00029__episode_000084", + "recording_id": null, + "episode_id": 84, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00029_84_243_camera_top.jpg", + "images/task_00029_84_350_camera_top.jpg", + "images/task_00029_84_913_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00029", + "split": "eval", + "split_group_id": "task_00029__episode_000084", + "recording_id": null, + "episode_id": 84, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00029_84_399_camera_top.jpg", + "images/task_00029_84_873_camera_top.jpg", + "images/task_00029_84_1029_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00029", + "split": "eval", + "split_group_id": "task_00029__episode_000084", + "recording_id": null, + "episode_id": 84, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the left side of the scene\nA. toward the right side of the scene\nB. toward the top of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the left side of the scene" + } + ], + "images": [ + "images/task_00029_84_658_camera_top.jpg", + "images/task_00029_84_663_camera_top.jpg", + "images/task_00029_84_668_camera_top.jpg", + "images/task_00029_84_673_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00029", + "split": "eval", + "split_group_id": "task_00029__episode_000084", + "recording_id": null, + "episode_id": 84, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00029_84_891_camera_top_t-6.jpg", + "images/task_00029_84_894_camera_top_t-3.jpg", + "images/task_00029_84_897_camera_top_t0.jpg", + "images/task_00029_84_900_camera_top_t+3.jpg", + "images/task_00029_84_903_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00029", + "split": "eval", + "split_group_id": "task_00029__episode_000084", + "recording_id": null, + "episode_id": 84, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. contact\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/task_00029_208_131_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00029", + "split": "eval", + "split_group_id": "task_00029__episode_000208", + "recording_id": null, + "episode_id": 208, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00030_19_223_camera_top.jpg", + "images/task_00030_19_729_camera_top.jpg", + "images/task_00030_19_124_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00030", + "split": "eval", + "split_group_id": "task_00030__episode_000019", + "recording_id": null, + "episode_id": 19, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00030_19_391_camera_top.jpg", + "images/task_00030_19_394_camera_top.jpg", + "images/task_00030_19_397_camera_top.jpg", + "images/task_00030_19_400_camera_top.jpg", + "images/task_00030_19_403_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00030", + "split": "eval", + "split_group_id": "task_00030__episode_000019", + "recording_id": null, + "episode_id": 19, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00030_19_135_camera_top.jpg", + "images/task_00030_19_174_camera_top.jpg", + "images/task_00030_19_481_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00030", + "split": "eval", + "split_group_id": "task_00030__episode_000019", + "recording_id": null, + "episode_id": 19, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00030_19_122_camera_top_t-6.jpg", + "images/task_00030_19_125_camera_top_t-3.jpg", + "images/task_00030_19_128_camera_top_t0.jpg", + "images/task_00030_19_131_camera_top_t+3.jpg", + "images/task_00030_19_134_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00030", + "split": "eval", + "split_group_id": "task_00030__episode_000019", + "recording_id": null, + "episode_id": 19, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00030_19_633_camera_top_t-6.jpg", + "images/task_00030_19_636_camera_top_t-3.jpg", + "images/task_00030_19_639_camera_top_t0.jpg", + "images/task_00030_19_642_camera_top_t+3.jpg", + "images/task_00030_19_645_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00030", + "split": "eval", + "split_group_id": "task_00030__episode_000019", + "recording_id": null, + "episode_id": 19, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00030_19_506_camera_top_t-6.jpg", + "images/task_00030_19_509_camera_top_t-3.jpg", + "images/task_00030_19_512_camera_top_t0.jpg", + "images/task_00030_19_515_camera_top_t+3.jpg", + "images/task_00030_19_518_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00030", + "split": "eval", + "split_group_id": "task_00030__episode_000019", + "recording_id": null, + "episode_id": 19, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. contact\nC. pre-approach\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/task_00030_19_133_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00030", + "split": "eval", + "split_group_id": "task_00030__episode_000019", + "recording_id": null, + "episode_id": 19, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00030_92_242_camera_top.jpg", + "images/task_00030_92_245_camera_top.jpg", + "images/task_00030_92_248_camera_top.jpg", + "images/task_00030_92_251_camera_top.jpg", + "images/task_00030_92_254_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00030", + "split": "eval", + "split_group_id": "task_00030__episode_000092", + "recording_id": null, + "episode_id": 92, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. pre-approach\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/task_00030_111_115_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00030", + "split": "eval", + "split_group_id": "task_00030__episode_000111", + "recording_id": null, + "episode_id": 111, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. contact\nC. transfer\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/task_00030_124_190_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00030", + "split": "eval", + "split_group_id": "task_00030__episode_000124", + "recording_id": null, + "episode_id": 124, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. pre-approach\nC. contact\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/task_00030_124_102_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00030", + "split": "eval", + "split_group_id": "task_00030__episode_000124", + "recording_id": null, + "episode_id": 124, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. contact\nC. pre-approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/task_00030_124_489_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00030", + "split": "eval", + "split_group_id": "task_00030__episode_000124", + "recording_id": null, + "episode_id": 124, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00030_157_712_camera_top_t-6.jpg", + "images/task_00030_157_715_camera_top_t-3.jpg", + "images/task_00030_157_718_camera_top_t0.jpg", + "images/task_00030_157_721_camera_top_t+3.jpg", + "images/task_00030_157_724_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00030", + "split": "eval", + "split_group_id": "task_00030__episode_000157", + "recording_id": null, + "episode_id": 157, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. transfer\nC. release\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/task_00030_157_821_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00030", + "split": "eval", + "split_group_id": "task_00030__episode_000157", + "recording_id": null, + "episode_id": 157, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the top of the scene\nB. toward the left side of the scene\nC. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00030_157_269_camera_top.jpg", + "images/task_00030_157_274_camera_top.jpg", + "images/task_00030_157_279_camera_top.jpg", + "images/task_00030_157_284_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00030", + "split": "eval", + "split_group_id": "task_00030__episode_000157", + "recording_id": null, + "episode_id": 157, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_00030_157_193_camera_top.jpg", + "images/task_00030_157_151_camera_top.jpg", + "images/task_00030_157_503_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00030", + "split": "eval", + "split_group_id": "task_00030__episode_000157", + "recording_id": null, + "episode_id": 157, + "camera": "camera_top", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. transfer\nC. approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. pre-approach" + } + ], + "images": [ + "images/task_00030_157_114_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00030", + "split": "eval", + "split_group_id": "task_00030__episode_000157", + "recording_id": null, + "episode_id": 157, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00030_157_889_camera_top.jpg", + "images/task_00030_157_160_camera_top.jpg", + "images/task_00030_157_210_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00030", + "split": "eval", + "split_group_id": "task_00030__episode_000157", + "recording_id": null, + "episode_id": 157, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. release\nC. approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/task_00030_161_835_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00030", + "split": "eval", + "split_group_id": "task_00030__episode_000161", + "recording_id": null, + "episode_id": 161, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the left side of the scene\nB. toward the bottom of the scene\nC. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00031_31_196_camera_top.jpg", + "images/task_00031_31_201_camera_top.jpg", + "images/task_00031_31_206_camera_top.jpg", + "images/task_00031_31_211_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00031", + "split": "eval", + "split_group_id": "task_00031__episode_000031", + "recording_id": null, + "episode_id": 31, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00031_47_534_camera_top.jpg", + "images/task_00031_47_537_camera_top.jpg", + "images/task_00031_47_540_camera_top.jpg", + "images/task_00031_47_543_camera_top.jpg", + "images/task_00031_47_546_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00031", + "split": "eval", + "split_group_id": "task_00031__episode_000047", + "recording_id": null, + "episode_id": 47, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00031_49_434_camera_top.jpg", + "images/task_00031_49_437_camera_top.jpg", + "images/task_00031_49_440_camera_top.jpg", + "images/task_00031_49_443_camera_top.jpg", + "images/task_00031_49_446_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00031", + "split": "eval", + "split_group_id": "task_00031__episode_000049", + "recording_id": null, + "episode_id": 49, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. contact\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/task_00031_54_316_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00031", + "split": "eval", + "split_group_id": "task_00031__episode_000054", + "recording_id": null, + "episode_id": 54, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. hold and carry\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. hold and carry" + } + ], + "images": [ + "images/task_00031_88_151_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00031", + "split": "eval", + "split_group_id": "task_00031__episode_000088", + "recording_id": null, + "episode_id": 88, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the left side of the scene\nB. toward the top of the scene\nC. toward the bottom of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the left side of the scene" + } + ], + "images": [ + "images/task_00031_91_97_camera_top.jpg", + "images/task_00031_91_102_camera_top.jpg", + "images/task_00031_91_107_camera_top.jpg", + "images/task_00031_91_112_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00031", + "split": "eval", + "split_group_id": "task_00031__episode_000091", + "recording_id": null, + "episode_id": 91, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00031_99_271_camera_top.jpg", + "images/task_00031_99_274_camera_top.jpg", + "images/task_00031_99_277_camera_top.jpg", + "images/task_00031_99_280_camera_top.jpg", + "images/task_00031_99_283_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00031", + "split": "eval", + "split_group_id": "task_00031__episode_000099", + "recording_id": null, + "episode_id": 99, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the left side of the scene\nB. toward the bottom of the scene\nC. toward the top of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the left side of the scene" + } + ], + "images": [ + "images/task_00031_120_260_camera_top.jpg", + "images/task_00031_120_265_camera_top.jpg", + "images/task_00031_120_270_camera_top.jpg", + "images/task_00031_120_275_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00031", + "split": "eval", + "split_group_id": "task_00031__episode_000120", + "recording_id": null, + "episode_id": 120, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. pre-approach\nC. transfer\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/task_00031_120_299_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00031", + "split": "eval", + "split_group_id": "task_00031__episode_000120", + "recording_id": null, + "episode_id": 120, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00031_120_355_camera_top.jpg", + "images/task_00031_120_405_camera_top.jpg", + "images/task_00031_120_136_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00031", + "split": "eval", + "split_group_id": "task_00031__episode_000120", + "recording_id": null, + "episode_id": 120, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. contact\nC. transfer\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/task_00031_120_308_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00031", + "split": "eval", + "split_group_id": "task_00031__episode_000120", + "recording_id": null, + "episode_id": 120, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00031_120_240_camera_top.jpg", + "images/task_00031_120_243_camera_top.jpg", + "images/task_00031_120_246_camera_top.jpg", + "images/task_00031_120_249_camera_top.jpg", + "images/task_00031_120_252_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00031", + "split": "eval", + "split_group_id": "task_00031__episode_000120", + "recording_id": null, + "episode_id": 120, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the left side of the scene\nA. toward the top of the scene\nB. toward the bottom of the scene\nC. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the left side of the scene" + } + ], + "images": [ + "images/task_00031_120_290_camera_top.jpg", + "images/task_00031_120_295_camera_top.jpg", + "images/task_00031_120_300_camera_top.jpg", + "images/task_00031_120_305_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00031", + "split": "eval", + "split_group_id": "task_00031__episode_000120", + "recording_id": null, + "episode_id": 120, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00031_120_188_camera_top.jpg", + "images/task_00031_120_191_camera_top.jpg", + "images/task_00031_120_194_camera_top.jpg", + "images/task_00031_120_197_camera_top.jpg", + "images/task_00031_120_200_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00031", + "split": "eval", + "split_group_id": "task_00031__episode_000120", + "recording_id": null, + "episode_id": 120, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. approach\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/task_00032_21_105_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00032", + "split": "eval", + "split_group_id": "task_00032__episode_000021", + "recording_id": null, + "episode_id": 21, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00032_74_1533_camera_top.jpg", + "images/task_00032_74_1536_camera_top.jpg", + "images/task_00032_74_1539_camera_top.jpg", + "images/task_00032_74_1542_camera_top.jpg", + "images/task_00032_74_1545_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00032", + "split": "eval", + "split_group_id": "task_00032__episode_000074", + "recording_id": null, + "episode_id": 74, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the left side of the scene\nB. toward the bottom of the scene\nC. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00032_82_769_camera_top.jpg", + "images/task_00032_82_774_camera_top.jpg", + "images/task_00032_82_779_camera_top.jpg", + "images/task_00032_82_784_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00032", + "split": "eval", + "split_group_id": "task_00032__episode_000082", + "recording_id": null, + "episode_id": 82, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the right side of the scene\nA. toward the top of the scene\nB. toward the bottom of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the right side of the scene" + } + ], + "images": [ + "images/task_00032_84_2681_camera_top.jpg", + "images/task_00032_84_2686_camera_top.jpg", + "images/task_00032_84_2691_camera_top.jpg", + "images/task_00032_84_2696_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00032", + "split": "eval", + "split_group_id": "task_00032__episode_000084", + "recording_id": null, + "episode_id": 84, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/task_00032_114_2326_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00032", + "split": "eval", + "split_group_id": "task_00032__episode_000114", + "recording_id": null, + "episode_id": 114, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. release\nC. pre-approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/task_00032_130_3790_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00032", + "split": "eval", + "split_group_id": "task_00032__episode_000130", + "recording_id": null, + "episode_id": 130, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the right side of the scene\nB. toward the left side of the scene\nC. toward the bottom of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the right side of the scene" + } + ], + "images": [ + "images/task_00033_2_356_camera_top.jpg", + "images/task_00033_2_361_camera_top.jpg", + "images/task_00033_2_366_camera_top.jpg", + "images/task_00033_2_371_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00033", + "split": "eval", + "split_group_id": "task_00033__episode_000002", + "recording_id": null, + "episode_id": 2, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00033_57_192_camera_top.jpg", + "images/task_00033_57_195_camera_top.jpg", + "images/task_00033_57_198_camera_top.jpg", + "images/task_00033_57_201_camera_top.jpg", + "images/task_00033_57_204_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00033", + "split": "eval", + "split_group_id": "task_00033__episode_000057", + "recording_id": null, + "episode_id": 57, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the bottom of the scene\nC. toward the left side of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00033_60_220_camera_top.jpg", + "images/task_00033_60_225_camera_top.jpg", + "images/task_00033_60_230_camera_top.jpg", + "images/task_00033_60_235_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00033", + "split": "eval", + "split_group_id": "task_00033__episode_000060", + "recording_id": null, + "episode_id": 60, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. approach\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. approach" + } + ], + "images": [ + "images/task_00033_63_98_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00033", + "split": "eval", + "split_group_id": "task_00033__episode_000063", + "recording_id": null, + "episode_id": 63, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00033_81_219_camera_top.jpg", + "images/task_00033_81_222_camera_top.jpg", + "images/task_00033_81_225_camera_top.jpg", + "images/task_00033_81_228_camera_top.jpg", + "images/task_00033_81_231_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00033", + "split": "eval", + "split_group_id": "task_00033__episode_000081", + "recording_id": null, + "episode_id": 81, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the right side of the scene\nA. toward the top of the scene\nB. toward the bottom of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the right side of the scene" + } + ], + "images": [ + "images/task_00034_31_789_camera_top.jpg", + "images/task_00034_31_794_camera_top.jpg", + "images/task_00034_31_799_camera_top.jpg", + "images/task_00034_31_804_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00034", + "split": "eval", + "split_group_id": "task_00034__episode_000031", + "recording_id": null, + "episode_id": 31, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00034_79_529_camera_top.jpg", + "images/task_00034_79_532_camera_top.jpg", + "images/task_00034_79_535_camera_top.jpg", + "images/task_00034_79_538_camera_top.jpg", + "images/task_00034_79_541_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00034", + "split": "eval", + "split_group_id": "task_00034__episode_000079", + "recording_id": null, + "episode_id": 79, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. hold and carry\nC. approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. approach" + } + ], + "images": [ + "images/task_00034_82_123_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00034", + "split": "eval", + "split_group_id": "task_00034__episode_000082", + "recording_id": null, + "episode_id": 82, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_00035_50_533_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00035", + "split": "eval", + "split_group_id": "task_00035__episode_000050", + "recording_id": null, + "episode_id": 50, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00035_50_820_camera_top.jpg", + "images/task_00035_50_1189_camera_top.jpg", + "images/task_00035_50_413_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00035", + "split": "eval", + "split_group_id": "task_00035__episode_000050", + "recording_id": null, + "episode_id": 50, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. contact\nC. transfer\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. hold and carry" + } + ], + "images": [ + "images/task_00035_50_648_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00035", + "split": "eval", + "split_group_id": "task_00035__episode_000050", + "recording_id": null, + "episode_id": 50, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00035_50_56_camera_top.jpg", + "images/task_00035_50_59_camera_top.jpg", + "images/task_00035_50_62_camera_top.jpg", + "images/task_00035_50_65_camera_top.jpg", + "images/task_00035_50_68_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00035", + "split": "eval", + "split_group_id": "task_00035__episode_000050", + "recording_id": null, + "episode_id": 50, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00035_50_927_camera_top.jpg", + "images/task_00035_50_930_camera_top.jpg", + "images/task_00035_50_933_camera_top.jpg", + "images/task_00035_50_936_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00035", + "split": "eval", + "split_group_id": "task_00035__episode_000050", + "recording_id": null, + "episode_id": 50, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00035_50_439_camera_top.jpg", + "images/task_00035_50_395_camera_top.jpg", + "images/task_00035_50_317_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00035", + "split": "eval", + "split_group_id": "task_00035__episode_000050", + "recording_id": null, + "episode_id": 50, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00035_50_1097_camera_top.jpg", + "images/task_00035_50_1100_camera_top.jpg", + "images/task_00035_50_1103_camera_top.jpg", + "images/task_00035_50_1106_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00035", + "split": "eval", + "split_group_id": "task_00035__episode_000050", + "recording_id": null, + "episode_id": 50, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00035_50_1211_camera_top.jpg", + "images/task_00035_50_1214_camera_top.jpg", + "images/task_00035_50_1217_camera_top.jpg", + "images/task_00035_50_1220_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00035", + "split": "eval", + "split_group_id": "task_00035__episode_000050", + "recording_id": null, + "episode_id": 50, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00035_50_301_camera_top.jpg", + "images/task_00035_50_444_camera_top.jpg", + "images/task_00035_50_907_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00035", + "split": "eval", + "split_group_id": "task_00035__episode_000050", + "recording_id": null, + "episode_id": 50, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00035_102_2607_camera_top.jpg", + "images/task_00035_102_2610_camera_top.jpg", + "images/task_00035_102_2613_camera_top.jpg", + "images/task_00035_102_2616_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00035", + "split": "eval", + "split_group_id": "task_00035__episode_000102", + "recording_id": null, + "episode_id": 102, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00035_102_2520_camera_top_t-6.jpg", + "images/task_00035_102_2523_camera_top_t-3.jpg", + "images/task_00035_102_2526_camera_top_t0.jpg", + "images/task_00035_102_2529_camera_top_t+3.jpg", + "images/task_00035_102_2532_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00035", + "split": "eval", + "split_group_id": "task_00035__episode_000102", + "recording_id": null, + "episode_id": 102, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. contact\nC. hold and carry\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. hold and carry" + } + ], + "images": [ + "images/task_00035_102_1698_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00035", + "split": "eval", + "split_group_id": "task_00035__episode_000102", + "recording_id": null, + "episode_id": 102, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. pre-approach\nC. hold and carry\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. pre-approach" + } + ], + "images": [ + "images/task_00035_102_420_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00035", + "split": "eval", + "split_group_id": "task_00035__episode_000102", + "recording_id": null, + "episode_id": 102, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_00035_102_535_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00035", + "split": "eval", + "split_group_id": "task_00035__episode_000102", + "recording_id": null, + "episode_id": 102, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00035_102_284_camera_top.jpg", + "images/task_00035_102_287_camera_top.jpg", + "images/task_00035_102_290_camera_top.jpg", + "images/task_00035_102_293_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00035", + "split": "eval", + "split_group_id": "task_00035__episode_000102", + "recording_id": null, + "episode_id": 102, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00035_102_1653_camera_top.jpg", + "images/task_00035_102_1656_camera_top.jpg", + "images/task_00035_102_1659_camera_top.jpg", + "images/task_00035_102_1662_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00035", + "split": "eval", + "split_group_id": "task_00035__episode_000102", + "recording_id": null, + "episode_id": 102, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_00035_102_2334_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00035", + "split": "eval", + "split_group_id": "task_00035__episode_000102", + "recording_id": null, + "episode_id": 102, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00035_102_1529_camera_top.jpg", + "images/task_00035_102_1532_camera_top.jpg", + "images/task_00035_102_1535_camera_top.jpg", + "images/task_00035_102_1538_camera_top.jpg", + "images/task_00035_102_1541_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00035", + "split": "eval", + "split_group_id": "task_00035__episode_000102", + "recording_id": null, + "episode_id": 102, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00036_18_3033_camera_top.jpg", + "images/task_00036_18_2830_camera_top.jpg", + "images/task_00036_18_2959_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00036", + "split": "eval", + "split_group_id": "task_00036__episode_000018", + "recording_id": null, + "episode_id": 18, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the top of the scene\nC. toward the left side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00036_42_3664_camera_top.jpg", + "images/task_00036_42_3669_camera_top.jpg", + "images/task_00036_42_3674_camera_top.jpg", + "images/task_00036_42_3679_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00036", + "split": "eval", + "split_group_id": "task_00036__episode_000042", + "recording_id": null, + "episode_id": 42, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. hold and carry\nC. pre-approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. approach" + } + ], + "images": [ + "images/task_00036_76_2620_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00036", + "split": "eval", + "split_group_id": "task_00036__episode_000076", + "recording_id": null, + "episode_id": 76, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00036_107_2569_camera_top.jpg", + "images/task_00036_107_2572_camera_top.jpg", + "images/task_00036_107_2575_camera_top.jpg", + "images/task_00036_107_2578_camera_top.jpg", + "images/task_00036_107_2581_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00036", + "split": "eval", + "split_group_id": "task_00036__episode_000107", + "recording_id": null, + "episode_id": 107, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the top of the scene\nC. toward the left side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00037_3_1245_camera_top.jpg", + "images/task_00037_3_1250_camera_top.jpg", + "images/task_00037_3_1255_camera_top.jpg", + "images/task_00037_3_1260_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00037", + "split": "eval", + "split_group_id": "task_00037__episode_000003", + "recording_id": null, + "episode_id": 3, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. transfer\nC. approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/task_00037_39_3824_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00037", + "split": "eval", + "split_group_id": "task_00037__episode_000039", + "recording_id": null, + "episode_id": 39, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00037_60_2750_camera_top.jpg", + "images/task_00037_60_2753_camera_top.jpg", + "images/task_00037_60_2756_camera_top.jpg", + "images/task_00037_60_2759_camera_top.jpg", + "images/task_00037_60_2762_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00037", + "split": "eval", + "split_group_id": "task_00037__episode_000060", + "recording_id": null, + "episode_id": 60, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the bottom of the scene\nA. toward the top of the scene\nB. toward the right side of the scene\nC. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00037_60_2391_camera_top.jpg", + "images/task_00037_60_2396_camera_top.jpg", + "images/task_00037_60_2401_camera_top.jpg", + "images/task_00037_60_2406_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00037", + "split": "eval", + "split_group_id": "task_00037__episode_000060", + "recording_id": null, + "episode_id": 60, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00037_60_2344_camera_top.jpg", + "images/task_00037_60_377_camera_top.jpg", + "images/task_00037_60_3658_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00037", + "split": "eval", + "split_group_id": "task_00037__episode_000060", + "recording_id": null, + "episode_id": 60, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00037_60_3478_camera_top.jpg", + "images/task_00037_60_3481_camera_top.jpg", + "images/task_00037_60_3484_camera_top.jpg", + "images/task_00037_60_3487_camera_top.jpg", + "images/task_00037_60_3490_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00037", + "split": "eval", + "split_group_id": "task_00037__episode_000060", + "recording_id": null, + "episode_id": 60, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00037_60_505_camera_top.jpg", + "images/task_00037_60_3365_camera_top.jpg", + "images/task_00037_60_3251_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00037", + "split": "eval", + "split_group_id": "task_00037__episode_000060", + "recording_id": null, + "episode_id": 60, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. pre-approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/task_00037_60_2587_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00037", + "split": "eval", + "split_group_id": "task_00037__episode_000060", + "recording_id": null, + "episode_id": 60, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00037_60_2367_camera_top_t-6.jpg", + "images/task_00037_60_2370_camera_top_t-3.jpg", + "images/task_00037_60_2373_camera_top_t0.jpg", + "images/task_00037_60_2376_camera_top_t+3.jpg", + "images/task_00037_60_2379_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00037", + "split": "eval", + "split_group_id": "task_00037__episode_000060", + "recording_id": null, + "episode_id": 60, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_00037_60_367_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00037", + "split": "eval", + "split_group_id": "task_00037__episode_000060", + "recording_id": null, + "episode_id": 60, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00037_60_320_camera_top.jpg", + "images/task_00037_60_527_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00037", + "split": "eval", + "split_group_id": "task_00037__episode_000060", + "recording_id": null, + "episode_id": 60, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. pre-approach\nC. transfer\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/task_00037_60_2469_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00037", + "split": "eval", + "split_group_id": "task_00037__episode_000060", + "recording_id": null, + "episode_id": 60, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. pre-approach\nC. hold and carry\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/task_00037_60_801_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00037", + "split": "eval", + "split_group_id": "task_00037__episode_000060", + "recording_id": null, + "episode_id": 60, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00037_60_1652_camera_top.jpg", + "images/task_00037_60_1655_camera_top.jpg", + "images/task_00037_60_1658_camera_top.jpg", + "images/task_00037_60_1661_camera_top.jpg", + "images/task_00037_60_1664_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00037", + "split": "eval", + "split_group_id": "task_00037__episode_000060", + "recording_id": null, + "episode_id": 60, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. pre-approach\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. approach" + } + ], + "images": [ + "images/task_00037_72_228_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00037", + "split": "eval", + "split_group_id": "task_00037__episode_000072", + "recording_id": null, + "episode_id": 72, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the left side of the scene\nC. toward the top of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00037_93_1391_camera_top.jpg", + "images/task_00037_93_1396_camera_top.jpg", + "images/task_00037_93_1401_camera_top.jpg", + "images/task_00037_93_1406_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00037", + "split": "eval", + "split_group_id": "task_00037__episode_000093", + "recording_id": null, + "episode_id": 93, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the left side of the scene\nA. toward the right side of the scene\nB. toward the top of the scene\nC. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the left side of the scene" + } + ], + "images": [ + "images/task_00037_147_266_camera_top.jpg", + "images/task_00037_147_271_camera_top.jpg", + "images/task_00037_147_276_camera_top.jpg", + "images/task_00037_147_281_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00037", + "split": "eval", + "split_group_id": "task_00037__episode_000147", + "recording_id": null, + "episode_id": 147, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. release\nC. transfer\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/task_00038_25_3577_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00038", + "split": "eval", + "split_group_id": "task_00038__episode_000025", + "recording_id": null, + "episode_id": 25, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the left side of the scene\nA. toward the right side of the scene\nB. toward the top of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the left side of the scene" + } + ], + "images": [ + "images/task_00038_36_1828_camera_top.jpg", + "images/task_00038_36_1833_camera_top.jpg", + "images/task_00038_36_1838_camera_top.jpg", + "images/task_00038_36_1843_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00038", + "split": "eval", + "split_group_id": "task_00038__episode_000036", + "recording_id": null, + "episode_id": 36, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the left side of the scene\nA. toward the bottom of the scene\nC. toward the top of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the left side of the scene" + } + ], + "images": [ + "images/task_00038_56_2324_camera_top.jpg", + "images/task_00038_56_2329_camera_top.jpg", + "images/task_00038_56_2334_camera_top.jpg", + "images/task_00038_56_2339_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00038", + "split": "eval", + "split_group_id": "task_00038__episode_000056", + "recording_id": null, + "episode_id": 56, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the bottom of the scene\nC. toward the top of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00038_65_1135_camera_top.jpg", + "images/task_00038_65_1140_camera_top.jpg", + "images/task_00038_65_1145_camera_top.jpg", + "images/task_00038_65_1150_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00038", + "split": "eval", + "split_group_id": "task_00038__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the right side of the scene\nB. toward the bottom of the scene\nC. toward the top of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the right side of the scene" + } + ], + "images": [ + "images/task_00038_85_3189_camera_top.jpg", + "images/task_00038_85_3194_camera_top.jpg", + "images/task_00038_85_3199_camera_top.jpg", + "images/task_00038_85_3204_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00038", + "split": "eval", + "split_group_id": "task_00038__episode_000085", + "recording_id": null, + "episode_id": 85, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the left side of the scene\nB. toward the bottom of the scene\nC. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00038_90_2310_camera_top.jpg", + "images/task_00038_90_2315_camera_top.jpg", + "images/task_00038_90_2320_camera_top.jpg", + "images/task_00038_90_2325_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00038", + "split": "eval", + "split_group_id": "task_00038__episode_000090", + "recording_id": null, + "episode_id": 90, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00038_116_2096_camera_top.jpg", + "images/task_00038_116_2168_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00038", + "split": "eval", + "split_group_id": "task_00038__episode_000116", + "recording_id": null, + "episode_id": 116, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. contact\nC. release\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/task_00038_116_2489_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00038", + "split": "eval", + "split_group_id": "task_00038__episode_000116", + "recording_id": null, + "episode_id": 116, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the left side of the scene\nB. toward the right side of the scene\nC. toward the top of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the left side of the scene" + } + ], + "images": [ + "images/task_00038_116_3203_camera_top.jpg", + "images/task_00038_116_3208_camera_top.jpg", + "images/task_00038_116_3213_camera_top.jpg", + "images/task_00038_116_3218_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00038", + "split": "eval", + "split_group_id": "task_00038__episode_000116", + "recording_id": null, + "episode_id": 116, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00038_116_2665_camera_top.jpg", + "images/task_00038_116_2668_camera_top.jpg", + "images/task_00038_116_2671_camera_top.jpg", + "images/task_00038_116_2674_camera_top.jpg", + "images/task_00038_116_2677_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00038", + "split": "eval", + "split_group_id": "task_00038__episode_000116", + "recording_id": null, + "episode_id": 116, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00038_116_2060_camera_top.jpg", + "images/task_00038_116_2128_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00038", + "split": "eval", + "split_group_id": "task_00038__episode_000116", + "recording_id": null, + "episode_id": 116, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. pre-approach\nC. approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. approach" + } + ], + "images": [ + "images/task_00038_116_3472_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00038", + "split": "eval", + "split_group_id": "task_00038__episode_000116", + "recording_id": null, + "episode_id": 116, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00038_116_2706_camera_top.jpg", + "images/task_00038_116_2500_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00038", + "split": "eval", + "split_group_id": "task_00038__episode_000116", + "recording_id": null, + "episode_id": 116, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00038_116_2569_camera_top.jpg", + "images/task_00038_116_2572_camera_top.jpg", + "images/task_00038_116_2575_camera_top.jpg", + "images/task_00038_116_2578_camera_top.jpg", + "images/task_00038_116_2581_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00038", + "split": "eval", + "split_group_id": "task_00038__episode_000116", + "recording_id": null, + "episode_id": 116, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. pre-approach\nC. release\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. pre-approach" + } + ], + "images": [ + "images/task_00038_116_1037_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00038", + "split": "eval", + "split_group_id": "task_00038__episode_000116", + "recording_id": null, + "episode_id": 116, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the left side of the scene\nA. toward the top of the scene\nB. toward the bottom of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the left side of the scene" + } + ], + "images": [ + "images/task_00038_133_1072_camera_top.jpg", + "images/task_00038_133_1077_camera_top.jpg", + "images/task_00038_133_1082_camera_top.jpg", + "images/task_00038_133_1087_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00038", + "split": "eval", + "split_group_id": "task_00038__episode_000133", + "recording_id": null, + "episode_id": 133, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. pre-approach\nC. transfer\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. approach" + } + ], + "images": [ + "images/task_00039_0_61_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00039", + "split": "eval", + "split_group_id": "task_00039__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00039_8_160_camera_top.jpg", + "images/task_00039_8_163_camera_top.jpg", + "images/task_00039_8_166_camera_top.jpg", + "images/task_00039_8_169_camera_top.jpg", + "images/task_00039_8_172_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00039", + "split": "eval", + "split_group_id": "task_00039__episode_000008", + "recording_id": null, + "episode_id": 8, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00039_74_124_camera_top.jpg", + "images/task_00039_74_127_camera_top.jpg", + "images/task_00039_74_130_camera_top.jpg", + "images/task_00039_74_133_camera_top.jpg", + "images/task_00039_74_136_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00039", + "split": "eval", + "split_group_id": "task_00039__episode_000074", + "recording_id": null, + "episode_id": 74, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00039_74_269_camera_top.jpg", + "images/task_00039_74_482_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00039", + "split": "eval", + "split_group_id": "task_00039__episode_000074", + "recording_id": null, + "episode_id": 74, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00039_74_108_camera_top.jpg", + "images/task_00039_74_658_camera_top.jpg", + "images/task_00039_74_271_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00039", + "split": "eval", + "split_group_id": "task_00039__episode_000074", + "recording_id": null, + "episode_id": 74, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00039_74_90_camera_top.jpg", + "images/task_00039_74_303_camera_top.jpg", + "images/task_00039_74_593_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00039", + "split": "eval", + "split_group_id": "task_00039__episode_000074", + "recording_id": null, + "episode_id": 74, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00039_74_298_camera_top.jpg", + "images/task_00039_74_534_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00039", + "split": "eval", + "split_group_id": "task_00039__episode_000074", + "recording_id": null, + "episode_id": 74, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the left side of the scene\nA. toward the bottom of the scene\nC. toward the top of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the left side of the scene" + } + ], + "images": [ + "images/task_00039_74_208_camera_top.jpg", + "images/task_00039_74_213_camera_top.jpg", + "images/task_00039_74_218_camera_top.jpg", + "images/task_00039_74_223_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00039", + "split": "eval", + "split_group_id": "task_00039__episode_000074", + "recording_id": null, + "episode_id": 74, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00039_74_176_camera_top.jpg", + "images/task_00039_74_106_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00039", + "split": "eval", + "split_group_id": "task_00039__episode_000074", + "recording_id": null, + "episode_id": 74, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the left side of the scene\nB. toward the top of the scene\nC. toward the right side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the left side of the scene" + } + ], + "images": [ + "images/task_00039_74_144_camera_top.jpg", + "images/task_00039_74_149_camera_top.jpg", + "images/task_00039_74_154_camera_top.jpg", + "images/task_00039_74_159_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00039", + "split": "eval", + "split_group_id": "task_00039__episode_000074", + "recording_id": null, + "episode_id": 74, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the top of the scene\nC. toward the left side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00039_105_138_camera_top.jpg", + "images/task_00039_105_143_camera_top.jpg", + "images/task_00039_105_148_camera_top.jpg", + "images/task_00039_105_153_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00039", + "split": "eval", + "split_group_id": "task_00039__episode_000105", + "recording_id": null, + "episode_id": 105, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00039_114_81_camera_top.jpg", + "images/task_00039_114_84_camera_top.jpg", + "images/task_00039_114_87_camera_top.jpg", + "images/task_00039_114_90_camera_top.jpg", + "images/task_00039_114_93_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00039", + "split": "eval", + "split_group_id": "task_00039__episode_000114", + "recording_id": null, + "episode_id": 114, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00039_136_90_camera_top.jpg", + "images/task_00039_136_93_camera_top.jpg", + "images/task_00039_136_96_camera_top.jpg", + "images/task_00039_136_99_camera_top.jpg", + "images/task_00039_136_102_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00039", + "split": "eval", + "split_group_id": "task_00039__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. release\nC. pre-approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/task_00039_149_336_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00039", + "split": "eval", + "split_group_id": "task_00039__episode_000149", + "recording_id": null, + "episode_id": 149, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the top of the scene\nC. toward the bottom of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00039_202_453_camera_top.jpg", + "images/task_00039_202_458_camera_top.jpg", + "images/task_00039_202_463_camera_top.jpg", + "images/task_00039_202_468_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00039", + "split": "eval", + "split_group_id": "task_00039__episode_000202", + "recording_id": null, + "episode_id": 202, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the left side of the scene\nA. toward the top of the scene\nB. toward the right side of the scene\nC. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the left side of the scene" + } + ], + "images": [ + "images/task_00040_28_444_camera_top.jpg", + "images/task_00040_28_449_camera_top.jpg", + "images/task_00040_28_454_camera_top.jpg", + "images/task_00040_28_459_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00040", + "split": "eval", + "split_group_id": "task_00040__episode_000028", + "recording_id": null, + "episode_id": 28, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the right side of the scene\nB. toward the top of the scene\nC. toward the bottom of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the right side of the scene" + } + ], + "images": [ + "images/task_00040_89_722_camera_top.jpg", + "images/task_00040_89_727_camera_top.jpg", + "images/task_00040_89_732_camera_top.jpg", + "images/task_00040_89_737_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00040", + "split": "eval", + "split_group_id": "task_00040__episode_000089", + "recording_id": null, + "episode_id": 89, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. release\nC. pre-approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. pre-approach" + } + ], + "images": [ + "images/task_00040_100_828_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00040", + "split": "eval", + "split_group_id": "task_00040__episode_000100", + "recording_id": null, + "episode_id": 100, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_00041_0_1907_camera_top.jpg", + "images/task_00041_0_4221_camera_top.jpg", + "images/task_00041_0_3664_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00041", + "split": "eval", + "split_group_id": "task_00041__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00041_0_3716_camera_top.jpg", + "images/task_00041_0_3892_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00041", + "split": "eval", + "split_group_id": "task_00041__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00041_0_4288_camera_top.jpg", + "images/task_00041_0_1928_camera_top.jpg", + "images/task_00041_0_3786_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00041", + "split": "eval", + "split_group_id": "task_00041__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. pre-approach\nC. contact\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. pre-approach" + } + ], + "images": [ + "images/task_00041_0_444_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00041", + "split": "eval", + "split_group_id": "task_00041__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00041_0_1833_camera_top.jpg", + "images/task_00041_0_1836_camera_top.jpg", + "images/task_00041_0_1839_camera_top.jpg", + "images/task_00041_0_1842_camera_top.jpg", + "images/task_00041_0_1845_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00041", + "split": "eval", + "split_group_id": "task_00041__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00041_0_833_camera_top.jpg", + "images/task_00041_0_836_camera_top.jpg", + "images/task_00041_0_839_camera_top.jpg", + "images/task_00041_0_842_camera_top.jpg", + "images/task_00041_0_845_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00041", + "split": "eval", + "split_group_id": "task_00041__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00041_0_3215_camera_top_t-6.jpg", + "images/task_00041_0_3218_camera_top_t-3.jpg", + "images/task_00041_0_3221_camera_top_t0.jpg", + "images/task_00041_0_3224_camera_top_t+3.jpg", + "images/task_00041_0_3227_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00041", + "split": "eval", + "split_group_id": "task_00041__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00041_0_2594_camera_top.jpg", + "images/task_00041_0_1908_camera_top.jpg", + "images/task_00041_0_2294_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00041", + "split": "eval", + "split_group_id": "task_00041__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. hold and carry\nC. contact\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/task_00041_36_2112_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00041", + "split": "eval", + "split_group_id": "task_00041__episode_000036", + "recording_id": null, + "episode_id": 36, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_00041_36_1835_camera_top.jpg", + "images/task_00041_36_822_camera_top.jpg", + "images/task_00041_36_1959_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00041", + "split": "eval", + "split_group_id": "task_00041__episode_000036", + "recording_id": null, + "episode_id": 36, + "camera": "camera_top", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the top of the scene\nA. toward the bottom of the scene\nB. toward the left side of the scene\nC. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the top of the scene" + } + ], + "images": [ + "images/task_00041_36_765_camera_top.jpg", + "images/task_00041_36_770_camera_top.jpg", + "images/task_00041_36_775_camera_top.jpg", + "images/task_00041_36_780_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00041", + "split": "eval", + "split_group_id": "task_00041__episode_000036", + "recording_id": null, + "episode_id": 36, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/task_00041_36_1128_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00041", + "split": "eval", + "split_group_id": "task_00041__episode_000036", + "recording_id": null, + "episode_id": 36, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00041_36_1785_camera_top.jpg", + "images/task_00041_36_1788_camera_top.jpg", + "images/task_00041_36_1791_camera_top.jpg", + "images/task_00041_36_1794_camera_top.jpg", + "images/task_00041_36_1797_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00041", + "split": "eval", + "split_group_id": "task_00041__episode_000036", + "recording_id": null, + "episode_id": 36, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the bottom of the scene\nC. toward the top of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00041_36_1148_camera_top.jpg", + "images/task_00041_36_1153_camera_top.jpg", + "images/task_00041_36_1158_camera_top.jpg", + "images/task_00041_36_1163_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00041", + "split": "eval", + "split_group_id": "task_00041__episode_000036", + "recording_id": null, + "episode_id": 36, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00041_36_1973_camera_top.jpg", + "images/task_00041_36_1751_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00041", + "split": "eval", + "split_group_id": "task_00041__episode_000036", + "recording_id": null, + "episode_id": 36, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00041_36_1093_camera_top.jpg", + "images/task_00041_36_1096_camera_top.jpg", + "images/task_00041_36_1099_camera_top.jpg", + "images/task_00041_36_1102_camera_top.jpg", + "images/task_00041_36_1105_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00041", + "split": "eval", + "split_group_id": "task_00041__episode_000036", + "recording_id": null, + "episode_id": 36, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the bottom of the scene\nA. toward the right side of the scene\nC. toward the left side of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00041_80_1494_camera_top.jpg", + "images/task_00041_80_1499_camera_top.jpg", + "images/task_00041_80_1504_camera_top.jpg", + "images/task_00041_80_1509_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00041", + "split": "eval", + "split_group_id": "task_00041__episode_000080", + "recording_id": null, + "episode_id": 80, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the bottom of the scene\nB. toward the right side of the scene\nC. toward the top of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00041_120_205_camera_top.jpg", + "images/task_00041_120_210_camera_top.jpg", + "images/task_00041_120_215_camera_top.jpg", + "images/task_00041_120_220_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00041", + "split": "eval", + "split_group_id": "task_00041__episode_000120", + "recording_id": null, + "episode_id": 120, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the left side of the scene\nA. toward the right side of the scene\nB. toward the top of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the left side of the scene" + } + ], + "images": [ + "images/task_00042_12_988_camera_top.jpg", + "images/task_00042_12_993_camera_top.jpg", + "images/task_00042_12_998_camera_top.jpg", + "images/task_00042_12_1003_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00042", + "split": "eval", + "split_group_id": "task_00042__episode_000012", + "recording_id": null, + "episode_id": 12, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the top of the scene\nC. toward the bottom of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00042_41_569_camera_top.jpg", + "images/task_00042_41_574_camera_top.jpg", + "images/task_00042_41_579_camera_top.jpg", + "images/task_00042_41_584_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00042", + "split": "eval", + "split_group_id": "task_00042__episode_000041", + "recording_id": null, + "episode_id": 41, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00042_62_461_camera_top.jpg", + "images/task_00042_62_464_camera_top.jpg", + "images/task_00042_62_467_camera_top.jpg", + "images/task_00042_62_470_camera_top.jpg", + "images/task_00042_62_473_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00042", + "split": "eval", + "split_group_id": "task_00042__episode_000062", + "recording_id": null, + "episode_id": 62, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the right side of the scene\nB. toward the bottom of the scene\nC. toward the left side of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the right side of the scene" + } + ], + "images": [ + "images/task_00042_62_512_camera_top.jpg", + "images/task_00042_62_517_camera_top.jpg", + "images/task_00042_62_522_camera_top.jpg", + "images/task_00042_62_527_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00042", + "split": "eval", + "split_group_id": "task_00042__episode_000062", + "recording_id": null, + "episode_id": 62, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. transfer\nC. release\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/task_00042_62_271_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00042", + "split": "eval", + "split_group_id": "task_00042__episode_000062", + "recording_id": null, + "episode_id": 62, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00042_62_876_camera_top_t-6.jpg", + "images/task_00042_62_882_camera_top_t-3.jpg", + "images/task_00042_62_882_camera_top_t0.jpg", + "images/task_00042_62_885_camera_top_t+3.jpg", + "images/task_00042_62_888_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00042", + "split": "eval", + "split_group_id": "task_00042__episode_000062", + "recording_id": null, + "episode_id": 62, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00042_62_349_camera_top.jpg", + "images/task_00042_62_988_camera_top.jpg", + "images/task_00042_62_274_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00042", + "split": "eval", + "split_group_id": "task_00042__episode_000062", + "recording_id": null, + "episode_id": 62, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_00042_62_1033_camera_top.jpg", + "images/task_00042_62_265_camera_top.jpg", + "images/task_00042_62_438_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00042", + "split": "eval", + "split_group_id": "task_00042__episode_000062", + "recording_id": null, + "episode_id": 62, + "camera": "camera_top", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the top of the scene\nA. toward the bottom of the scene\nB. toward the right side of the scene\nC. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the top of the scene" + } + ], + "images": [ + "images/task_00042_71_800_camera_top.jpg", + "images/task_00042_71_805_camera_top.jpg", + "images/task_00042_71_810_camera_top.jpg", + "images/task_00042_71_815_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00042", + "split": "eval", + "split_group_id": "task_00042__episode_000071", + "recording_id": null, + "episode_id": 71, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00042_71_132_camera_top.jpg", + "images/task_00042_71_915_camera_top.jpg", + "images/task_00042_71_318_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00042", + "split": "eval", + "split_group_id": "task_00042__episode_000071", + "recording_id": null, + "episode_id": 71, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00042_71_280_camera_top.jpg", + "images/task_00042_71_283_camera_top.jpg", + "images/task_00042_71_286_camera_top.jpg", + "images/task_00042_71_289_camera_top.jpg", + "images/task_00042_71_292_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00042", + "split": "eval", + "split_group_id": "task_00042__episode_000071", + "recording_id": null, + "episode_id": 71, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00042_71_571_camera_top_t-6.jpg", + "images/task_00042_71_574_camera_top_t-3.jpg", + "images/task_00042_71_577_camera_top_t0.jpg", + "images/task_00042_71_580_camera_top_t+3.jpg", + "images/task_00042_71_583_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00042", + "split": "eval", + "split_group_id": "task_00042__episode_000071", + "recording_id": null, + "episode_id": 71, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00042_71_240_camera_top.jpg", + "images/task_00042_71_157_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00042", + "split": "eval", + "split_group_id": "task_00042__episode_000071", + "recording_id": null, + "episode_id": 71, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00042_71_315_camera_top.jpg", + "images/task_00042_71_125_camera_top.jpg", + "images/task_00042_71_478_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00042", + "split": "eval", + "split_group_id": "task_00042__episode_000071", + "recording_id": null, + "episode_id": 71, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00042_71_155_camera_top.jpg", + "images/task_00042_71_313_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00042", + "split": "eval", + "split_group_id": "task_00042__episode_000071", + "recording_id": null, + "episode_id": 71, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. release\nC. transfer\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/task_00042_71_313_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00042", + "split": "eval", + "split_group_id": "task_00042__episode_000071", + "recording_id": null, + "episode_id": 71, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the bottom of the scene\nB. toward the top of the scene\nC. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00042_79_188_camera_top.jpg", + "images/task_00042_79_193_camera_top.jpg", + "images/task_00042_79_198_camera_top.jpg", + "images/task_00042_79_203_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00042", + "split": "eval", + "split_group_id": "task_00042__episode_000079", + "recording_id": null, + "episode_id": 79, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00042_87_343_camera_top.jpg", + "images/task_00042_87_471_camera_top.jpg", + "images/task_00042_87_293_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00042", + "split": "eval", + "split_group_id": "task_00042__episode_000087", + "recording_id": null, + "episode_id": 87, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00042_87_1103_camera_top_t-6.jpg", + "images/task_00042_87_1106_camera_top_t-3.jpg", + "images/task_00042_87_1109_camera_top_t0.jpg", + "images/task_00042_87_1112_camera_top_t+3.jpg", + "images/task_00042_87_1115_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00042", + "split": "eval", + "split_group_id": "task_00042__episode_000087", + "recording_id": null, + "episode_id": 87, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00042_87_342_camera_top.jpg", + "images/task_00042_87_345_camera_top.jpg", + "images/task_00042_87_348_camera_top.jpg", + "images/task_00042_87_351_camera_top.jpg", + "images/task_00042_87_354_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00042", + "split": "eval", + "split_group_id": "task_00042__episode_000087", + "recording_id": null, + "episode_id": 87, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00042_87_396_camera_top.jpg", + "images/task_00042_87_399_camera_top.jpg", + "images/task_00042_87_402_camera_top.jpg", + "images/task_00042_87_405_camera_top.jpg", + "images/task_00042_87_408_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00042", + "split": "eval", + "split_group_id": "task_00042__episode_000087", + "recording_id": null, + "episode_id": 87, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00042_87_439_camera_top.jpg", + "images/task_00042_87_442_camera_top.jpg", + "images/task_00042_87_445_camera_top.jpg", + "images/task_00042_87_448_camera_top.jpg", + "images/task_00042_87_451_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00042", + "split": "eval", + "split_group_id": "task_00042__episode_000087", + "recording_id": null, + "episode_id": 87, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00042_87_310_camera_top.jpg", + "images/task_00042_87_278_camera_top.jpg", + "images/task_00042_87_705_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00042", + "split": "eval", + "split_group_id": "task_00042__episode_000087", + "recording_id": null, + "episode_id": 87, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. contact\nC. pre-approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. pre-approach" + } + ], + "images": [ + "images/task_00042_92_264_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00042", + "split": "eval", + "split_group_id": "task_00042__episode_000092", + "recording_id": null, + "episode_id": 92, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00042_96_345_camera_top.jpg", + "images/task_00042_96_506_camera_top.jpg", + "images/task_00042_96_396_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00042", + "split": "eval", + "split_group_id": "task_00042__episode_000096", + "recording_id": null, + "episode_id": 96, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the bottom of the scene\nB. toward the left side of the scene\nC. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00042_125_504_camera_top.jpg", + "images/task_00042_125_509_camera_top.jpg", + "images/task_00042_125_514_camera_top.jpg", + "images/task_00042_125_519_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00042", + "split": "eval", + "split_group_id": "task_00042__episode_000125", + "recording_id": null, + "episode_id": 125, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the top of the scene\nB. toward the bottom of the scene\nC. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00043_52_172_camera_top.jpg", + "images/task_00043_52_177_camera_top.jpg", + "images/task_00043_52_182_camera_top.jpg", + "images/task_00043_52_187_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00043", + "split": "eval", + "split_group_id": "task_00043__episode_000052", + "recording_id": null, + "episode_id": 52, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the left side of the scene\nB. toward the top of the scene\nC. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00043_71_184_camera_top.jpg", + "images/task_00043_71_189_camera_top.jpg", + "images/task_00043_71_194_camera_top.jpg", + "images/task_00043_71_199_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00043", + "split": "eval", + "split_group_id": "task_00043__episode_000071", + "recording_id": null, + "episode_id": 71, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_00043_105_1045_camera_top.jpg", + "images/task_00043_105_157_camera_top.jpg", + "images/task_00043_105_894_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00043", + "split": "eval", + "split_group_id": "task_00043__episode_000105", + "recording_id": null, + "episode_id": 105, + "camera": "camera_top", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. release\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. hold and carry" + } + ], + "images": [ + "images/task_00043_121_209_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00043", + "split": "eval", + "split_group_id": "task_00043__episode_000121", + "recording_id": null, + "episode_id": 121, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the left side of the scene\nB. toward the right side of the scene\nC. toward the bottom of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the left side of the scene" + } + ], + "images": [ + "images/task_00044_14_157_camera_top.jpg", + "images/task_00044_14_162_camera_top.jpg", + "images/task_00044_14_167_camera_top.jpg", + "images/task_00044_14_172_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00044", + "split": "eval", + "split_group_id": "task_00044__episode_000014", + "recording_id": null, + "episode_id": 14, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. hold and carry\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/task_00044_19_1680_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00044", + "split": "eval", + "split_group_id": "task_00044__episode_000019", + "recording_id": null, + "episode_id": 19, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. transfer\nC. release\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/task_00044_36_2419_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00044", + "split": "eval", + "split_group_id": "task_00044__episode_000036", + "recording_id": null, + "episode_id": 36, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. transfer\nC. contact\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/task_00044_121_395_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00044", + "split": "eval", + "split_group_id": "task_00044__episode_000121", + "recording_id": null, + "episode_id": 121, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. approach\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/task_00045_19_3264_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00045", + "split": "eval", + "split_group_id": "task_00045__episode_000019", + "recording_id": null, + "episode_id": 19, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00045_27_720_camera_top.jpg", + "images/task_00045_27_723_camera_top.jpg", + "images/task_00045_27_726_camera_top.jpg", + "images/task_00045_27_729_camera_top.jpg", + "images/task_00045_27_732_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00045", + "split": "eval", + "split_group_id": "task_00045__episode_000027", + "recording_id": null, + "episode_id": 27, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the left side of the scene\nA. toward the right side of the scene\nC. toward the bottom of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the left side of the scene" + } + ], + "images": [ + "images/task_00045_67_2048_camera_top.jpg", + "images/task_00045_67_2053_camera_top.jpg", + "images/task_00045_67_2058_camera_top.jpg", + "images/task_00045_67_2063_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00045", + "split": "eval", + "split_group_id": "task_00045__episode_000067", + "recording_id": null, + "episode_id": 67, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the left side of the scene\nA. toward the bottom of the scene\nC. toward the top of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the left side of the scene" + } + ], + "images": [ + "images/task_00046_2_490_camera_top.jpg", + "images/task_00046_2_495_camera_top.jpg", + "images/task_00046_2_500_camera_top.jpg", + "images/task_00046_2_505_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00046", + "split": "eval", + "split_group_id": "task_00046__episode_000002", + "recording_id": null, + "episode_id": 2, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_00046_2_171_camera_top.jpg", + "images/task_00046_2_474_camera_top.jpg", + "images/task_00046_2_295_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00046", + "split": "eval", + "split_group_id": "task_00046__episode_000002", + "recording_id": null, + "episode_id": 2, + "camera": "camera_top", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00046_2_242_camera_top.jpg", + "images/task_00046_2_245_camera_top.jpg", + "images/task_00046_2_248_camera_top.jpg", + "images/task_00046_2_251_camera_top.jpg", + "images/task_00046_2_254_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00046", + "split": "eval", + "split_group_id": "task_00046__episode_000002", + "recording_id": null, + "episode_id": 2, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00046_2_208_camera_top.jpg", + "images/task_00046_2_211_camera_top.jpg", + "images/task_00046_2_214_camera_top.jpg", + "images/task_00046_2_217_camera_top.jpg", + "images/task_00046_2_220_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00046", + "split": "eval", + "split_group_id": "task_00046__episode_000002", + "recording_id": null, + "episode_id": 2, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the bottom of the scene\nC. toward the top of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00046_2_774_camera_top.jpg", + "images/task_00046_2_779_camera_top.jpg", + "images/task_00046_2_784_camera_top.jpg", + "images/task_00046_2_789_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00046", + "split": "eval", + "split_group_id": "task_00046__episode_000002", + "recording_id": null, + "episode_id": 2, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00046_2_365_camera_top.jpg", + "images/task_00046_2_176_camera_top.jpg", + "images/task_00046_2_720_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00046", + "split": "eval", + "split_group_id": "task_00046__episode_000002", + "recording_id": null, + "episode_id": 2, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. hold and carry\nC. approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/task_00046_2_717_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00046", + "split": "eval", + "split_group_id": "task_00046__episode_000002", + "recording_id": null, + "episode_id": 2, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. approach\nC. transfer\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/task_00046_5_535_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00046", + "split": "eval", + "split_group_id": "task_00046__episode_000005", + "recording_id": null, + "episode_id": 5, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the left side of the scene\nC. toward the bottom of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00046_31_962_camera_top.jpg", + "images/task_00046_31_967_camera_top.jpg", + "images/task_00046_31_972_camera_top.jpg", + "images/task_00046_31_977_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00046", + "split": "eval", + "split_group_id": "task_00046__episode_000031", + "recording_id": null, + "episode_id": 31, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00046_34_774_camera_top.jpg", + "images/task_00046_34_777_camera_top.jpg", + "images/task_00046_34_780_camera_top.jpg", + "images/task_00046_34_783_camera_top.jpg", + "images/task_00046_34_786_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00046", + "split": "eval", + "split_group_id": "task_00046__episode_000034", + "recording_id": null, + "episode_id": 34, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the right side of the scene\nB. toward the bottom of the scene\nC. toward the top of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the right side of the scene" + } + ], + "images": [ + "images/task_00046_52_949_camera_top.jpg", + "images/task_00046_52_954_camera_top.jpg", + "images/task_00046_52_959_camera_top.jpg", + "images/task_00046_52_964_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00046", + "split": "eval", + "split_group_id": "task_00046__episode_000052", + "recording_id": null, + "episode_id": 52, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00046_71_614_camera_top.jpg", + "images/task_00046_71_617_camera_top.jpg", + "images/task_00046_71_620_camera_top.jpg", + "images/task_00046_71_623_camera_top.jpg", + "images/task_00046_71_626_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00046", + "split": "eval", + "split_group_id": "task_00046__episode_000071", + "recording_id": null, + "episode_id": 71, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the left side of the scene\nB. toward the bottom of the scene\nC. toward the top of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the left side of the scene" + } + ], + "images": [ + "images/task_00046_86_270_camera_top.jpg", + "images/task_00046_86_275_camera_top.jpg", + "images/task_00046_86_280_camera_top.jpg", + "images/task_00046_86_285_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00046", + "split": "eval", + "split_group_id": "task_00046__episode_000086", + "recording_id": null, + "episode_id": 86, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. pre-approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/task_00046_97_389_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00046", + "split": "eval", + "split_group_id": "task_00046__episode_000097", + "recording_id": null, + "episode_id": 97, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00046_100_630_camera_top.jpg", + "images/task_00046_100_633_camera_top.jpg", + "images/task_00046_100_636_camera_top.jpg", + "images/task_00046_100_639_camera_top.jpg", + "images/task_00046_100_642_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00046", + "split": "eval", + "split_group_id": "task_00046__episode_000100", + "recording_id": null, + "episode_id": 100, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00046_102_257_camera_top.jpg", + "images/task_00046_102_260_camera_top.jpg", + "images/task_00046_102_263_camera_top.jpg", + "images/task_00046_102_266_camera_top.jpg", + "images/task_00046_102_269_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00046", + "split": "eval", + "split_group_id": "task_00046__episode_000102", + "recording_id": null, + "episode_id": 102, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. pre-approach\nC. approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. approach" + } + ], + "images": [ + "images/task_00046_118_397_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00046", + "split": "eval", + "split_group_id": "task_00046__episode_000118", + "recording_id": null, + "episode_id": 118, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00046_118_213_camera_top.jpg", + "images/task_00046_118_216_camera_top.jpg", + "images/task_00046_118_219_camera_top.jpg", + "images/task_00046_118_222_camera_top.jpg", + "images/task_00046_118_225_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00046", + "split": "eval", + "split_group_id": "task_00046__episode_000118", + "recording_id": null, + "episode_id": 118, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the top of the scene\nC. toward the left side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00046_118_264_camera_top.jpg", + "images/task_00046_118_269_camera_top.jpg", + "images/task_00046_118_274_camera_top.jpg", + "images/task_00046_118_279_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00046", + "split": "eval", + "split_group_id": "task_00046__episode_000118", + "recording_id": null, + "episode_id": 118, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the left side of the scene\nB. toward the bottom of the scene\nC. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00046_118_263_camera_top.jpg", + "images/task_00046_118_268_camera_top.jpg", + "images/task_00046_118_273_camera_top.jpg", + "images/task_00046_118_278_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00046", + "split": "eval", + "split_group_id": "task_00046__episode_000118", + "recording_id": null, + "episode_id": 118, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00046_118_583_camera_top.jpg", + "images/task_00046_118_586_camera_top.jpg", + "images/task_00046_118_589_camera_top.jpg", + "images/task_00046_118_592_camera_top.jpg", + "images/task_00046_118_595_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00046", + "split": "eval", + "split_group_id": "task_00046__episode_000118", + "recording_id": null, + "episode_id": 118, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00046_118_737_camera_top_t-6.jpg", + "images/task_00046_118_740_camera_top_t-3.jpg", + "images/task_00046_118_743_camera_top_t0.jpg", + "images/task_00046_118_746_camera_top_t+3.jpg", + "images/task_00046_118_749_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00046", + "split": "eval", + "split_group_id": "task_00046__episode_000118", + "recording_id": null, + "episode_id": 118, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00046_118_410_camera_top.jpg", + "images/task_00046_118_774_camera_top.jpg", + "images/task_00046_118_696_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00046", + "split": "eval", + "split_group_id": "task_00046__episode_000118", + "recording_id": null, + "episode_id": 118, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/task_00047_47_2638_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00047", + "split": "eval", + "split_group_id": "task_00047__episode_000047", + "recording_id": null, + "episode_id": 47, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. contact\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/task_00047_47_2671_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00047", + "split": "eval", + "split_group_id": "task_00047__episode_000047", + "recording_id": null, + "episode_id": 47, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the left side of the scene\nB. toward the bottom of the scene\nC. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00047_47_1415_camera_top.jpg", + "images/task_00047_47_1420_camera_top.jpg", + "images/task_00047_47_1425_camera_top.jpg", + "images/task_00047_47_1430_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00047", + "split": "eval", + "split_group_id": "task_00047__episode_000047", + "recording_id": null, + "episode_id": 47, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00047_47_1180_camera_top_t-6.jpg", + "images/task_00047_47_1183_camera_top_t-3.jpg", + "images/task_00047_47_1186_camera_top_t0.jpg", + "images/task_00047_47_1189_camera_top_t+3.jpg", + "images/task_00047_47_1192_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00047", + "split": "eval", + "split_group_id": "task_00047__episode_000047", + "recording_id": null, + "episode_id": 47, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the left side of the scene\nA. toward the top of the scene\nB. toward the bottom of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the left side of the scene" + } + ], + "images": [ + "images/task_00047_47_1186_camera_top.jpg", + "images/task_00047_47_1191_camera_top.jpg", + "images/task_00047_47_1196_camera_top.jpg", + "images/task_00047_47_1201_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00047", + "split": "eval", + "split_group_id": "task_00047__episode_000047", + "recording_id": null, + "episode_id": 47, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00047_47_1990_camera_top.jpg", + "images/task_00047_47_1993_camera_top.jpg", + "images/task_00047_47_1996_camera_top.jpg", + "images/task_00047_47_1999_camera_top.jpg", + "images/task_00047_47_2002_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00047", + "split": "eval", + "split_group_id": "task_00047__episode_000047", + "recording_id": null, + "episode_id": 47, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00047_47_1288_camera_top.jpg", + "images/task_00047_47_1123_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00047", + "split": "eval", + "split_group_id": "task_00047__episode_000047", + "recording_id": null, + "episode_id": 47, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00047_47_1917_camera_top.jpg", + "images/task_00047_47_1467_camera_top.jpg", + "images/task_00047_47_934_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00047", + "split": "eval", + "split_group_id": "task_00047__episode_000047", + "recording_id": null, + "episode_id": 47, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. approach\nC. transfer\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. approach" + } + ], + "images": [ + "images/task_00047_47_2107_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00047", + "split": "eval", + "split_group_id": "task_00047__episode_000047", + "recording_id": null, + "episode_id": 47, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00047_47_2534_camera_top_t-6.jpg", + "images/task_00047_47_2537_camera_top_t-3.jpg", + "images/task_00047_47_2540_camera_top_t0.jpg", + "images/task_00047_47_2543_camera_top_t+3.jpg", + "images/task_00047_47_2546_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00047", + "split": "eval", + "split_group_id": "task_00047__episode_000047", + "recording_id": null, + "episode_id": 47, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the left side of the scene\nB. toward the top of the scene\nC. toward the right side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the left side of the scene" + } + ], + "images": [ + "images/task_00047_79_1754_camera_top.jpg", + "images/task_00047_79_1759_camera_top.jpg", + "images/task_00047_79_1764_camera_top.jpg", + "images/task_00047_79_1769_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00047", + "split": "eval", + "split_group_id": "task_00047__episode_000079", + "recording_id": null, + "episode_id": 79, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. hold and carry\nC. contact\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/task_00047_109_665_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00047", + "split": "eval", + "split_group_id": "task_00047__episode_000109", + "recording_id": null, + "episode_id": 109, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00047_127_2404_camera_top.jpg", + "images/task_00047_127_2407_camera_top.jpg", + "images/task_00047_127_2410_camera_top.jpg", + "images/task_00047_127_2413_camera_top.jpg", + "images/task_00047_127_2416_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00047", + "split": "eval", + "split_group_id": "task_00047__episode_000127", + "recording_id": null, + "episode_id": 127, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00047_133_1197_camera_top.jpg", + "images/task_00047_133_1014_camera_top.jpg", + "images/task_00047_133_1511_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00047", + "split": "eval", + "split_group_id": "task_00047__episode_000133", + "recording_id": null, + "episode_id": 133, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00048_15_887_camera_top.jpg", + "images/task_00048_15_890_camera_top.jpg", + "images/task_00048_15_893_camera_top.jpg", + "images/task_00048_15_896_camera_top.jpg", + "images/task_00048_15_899_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00048", + "split": "eval", + "split_group_id": "task_00048__episode_000015", + "recording_id": null, + "episode_id": 15, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the left side of the scene\nA. toward the right side of the scene\nC. toward the top of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the left side of the scene" + } + ], + "images": [ + "images/task_00048_23_1163_camera_top.jpg", + "images/task_00048_23_1168_camera_top.jpg", + "images/task_00048_23_1173_camera_top.jpg", + "images/task_00048_23_1178_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00048", + "split": "eval", + "split_group_id": "task_00048__episode_000023", + "recording_id": null, + "episode_id": 23, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00048_84_569_camera_top.jpg", + "images/task_00048_84_572_camera_top.jpg", + "images/task_00048_84_575_camera_top.jpg", + "images/task_00048_84_578_camera_top.jpg", + "images/task_00048_84_581_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00048", + "split": "eval", + "split_group_id": "task_00048__episode_000084", + "recording_id": null, + "episode_id": 84, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. contact\nC. transfer\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/task_00049_5_1392_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00049", + "split": "eval", + "split_group_id": "task_00049__episode_000005", + "recording_id": null, + "episode_id": 5, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. pre-approach\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. hold and carry" + } + ], + "images": [ + "images/task_00049_51_539_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00049", + "split": "eval", + "split_group_id": "task_00049__episode_000051", + "recording_id": null, + "episode_id": 51, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00049_51_1797_camera_top_t-6.jpg", + "images/task_00049_51_1800_camera_top_t-3.jpg", + "images/task_00049_51_1803_camera_top_t0.jpg", + "images/task_00049_51_1806_camera_top_t+3.jpg", + "images/task_00049_51_1809_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00049", + "split": "eval", + "split_group_id": "task_00049__episode_000051", + "recording_id": null, + "episode_id": 51, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00049_51_3137_camera_top.jpg", + "images/task_00049_51_3140_camera_top.jpg", + "images/task_00049_51_3143_camera_top.jpg", + "images/task_00049_51_3146_camera_top.jpg", + "images/task_00049_51_3149_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00049", + "split": "eval", + "split_group_id": "task_00049__episode_000051", + "recording_id": null, + "episode_id": 51, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. pre-approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/task_00049_51_745_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00049", + "split": "eval", + "split_group_id": "task_00049__episode_000051", + "recording_id": null, + "episode_id": 51, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00049_51_1327_camera_top_t-6.jpg", + "images/task_00049_51_1330_camera_top_t-3.jpg", + "images/task_00049_51_1333_camera_top_t0.jpg", + "images/task_00049_51_1336_camera_top_t+3.jpg", + "images/task_00049_51_1339_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00049", + "split": "eval", + "split_group_id": "task_00049__episode_000051", + "recording_id": null, + "episode_id": 51, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. pre-approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/task_00049_51_2411_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00049", + "split": "eval", + "split_group_id": "task_00049__episode_000051", + "recording_id": null, + "episode_id": 51, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the bottom of the scene\nB. toward the left side of the scene\nC. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00049_51_1548_camera_top.jpg", + "images/task_00049_51_1553_camera_top.jpg", + "images/task_00049_51_1558_camera_top.jpg", + "images/task_00049_51_1563_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00049", + "split": "eval", + "split_group_id": "task_00049__episode_000051", + "recording_id": null, + "episode_id": 51, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00049_51_2101_camera_top.jpg", + "images/task_00049_51_2245_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00049", + "split": "eval", + "split_group_id": "task_00049__episode_000051", + "recording_id": null, + "episode_id": 51, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00049_73_1108_camera_top.jpg", + "images/task_00049_73_1111_camera_top.jpg", + "images/task_00049_73_1114_camera_top.jpg", + "images/task_00049_73_1117_camera_top.jpg", + "images/task_00049_73_1120_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00049", + "split": "eval", + "split_group_id": "task_00049__episode_000073", + "recording_id": null, + "episode_id": 73, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00049_90_3906_camera_top.jpg", + "images/task_00049_90_4520_camera_top.jpg", + "images/task_00049_90_2036_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00049", + "split": "eval", + "split_group_id": "task_00049__episode_000090", + "recording_id": null, + "episode_id": 90, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_00049_114_2327_camera_top.jpg", + "images/task_00049_114_2839_camera_top.jpg", + "images/task_00049_114_1555_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00049", + "split": "eval", + "split_group_id": "task_00049__episode_000114", + "recording_id": null, + "episode_id": 114, + "camera": "camera_top", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00050_10_4226_camera_top.jpg", + "images/task_00050_10_4292_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000010", + "recording_id": null, + "episode_id": 10, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the bottom of the scene\nB. toward the top of the scene\nC. toward the left side of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00050_10_4218_camera_top.jpg", + "images/task_00050_10_4223_camera_top.jpg", + "images/task_00050_10_4228_camera_top.jpg", + "images/task_00050_10_4233_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000010", + "recording_id": null, + "episode_id": 10, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the top of the scene\nA. toward the left side of the scene\nB. toward the bottom of the scene\nC. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the top of the scene" + } + ], + "images": [ + "images/task_00050_10_5673_camera_top.jpg", + "images/task_00050_10_5678_camera_top.jpg", + "images/task_00050_10_5683_camera_top.jpg", + "images/task_00050_10_5688_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000010", + "recording_id": null, + "episode_id": 10, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_00050_10_1301_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000010", + "recording_id": null, + "episode_id": 10, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00050_10_4446_camera_top.jpg", + "images/task_00050_10_4246_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000010", + "recording_id": null, + "episode_id": 10, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the bottom of the scene\nA. toward the left side of the scene\nC. toward the top of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00050_10_4533_camera_top.jpg", + "images/task_00050_10_4538_camera_top.jpg", + "images/task_00050_10_4543_camera_top.jpg", + "images/task_00050_10_4548_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000010", + "recording_id": null, + "episode_id": 10, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00050_10_4508_camera_top.jpg", + "images/task_00050_10_4511_camera_top.jpg", + "images/task_00050_10_4514_camera_top.jpg", + "images/task_00050_10_4517_camera_top.jpg", + "images/task_00050_10_4520_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000010", + "recording_id": null, + "episode_id": 10, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00050_10_4952_camera_top_t-6.jpg", + "images/task_00050_10_4955_camera_top_t-3.jpg", + "images/task_00050_10_4958_camera_top_t0.jpg", + "images/task_00050_10_4961_camera_top_t+3.jpg", + "images/task_00050_10_4964_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000010", + "recording_id": null, + "episode_id": 10, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the bottom of the scene\nA. toward the right side of the scene\nC. toward the top of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00050_10_4226_camera_top.jpg", + "images/task_00050_10_4231_camera_top.jpg", + "images/task_00050_10_4236_camera_top.jpg", + "images/task_00050_10_4241_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000010", + "recording_id": null, + "episode_id": 10, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the left side of the scene\nA. toward the top of the scene\nB. toward the right side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the left side of the scene" + } + ], + "images": [ + "images/task_00050_10_4382_camera_top.jpg", + "images/task_00050_10_4387_camera_top.jpg", + "images/task_00050_10_4392_camera_top.jpg", + "images/task_00050_10_4397_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000010", + "recording_id": null, + "episode_id": 10, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00050_10_4349_camera_top.jpg", + "images/task_00050_10_4352_camera_top.jpg", + "images/task_00050_10_4355_camera_top.jpg", + "images/task_00050_10_4358_camera_top.jpg", + "images/task_00050_10_4361_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000010", + "recording_id": null, + "episode_id": 10, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_00050_10_3340_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000010", + "recording_id": null, + "episode_id": 10, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_00050_10_4251_camera_top.jpg", + "images/task_00050_10_4580_camera_top.jpg", + "images/task_00050_10_4400_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000010", + "recording_id": null, + "episode_id": 10, + "camera": "camera_top", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the top of the scene\nB. toward the bottom of the scene\nC. toward the left side of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the top of the scene" + } + ], + "images": [ + "images/task_00050_10_4565_camera_top.jpg", + "images/task_00050_10_4570_camera_top.jpg", + "images/task_00050_10_4575_camera_top.jpg", + "images/task_00050_10_4580_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000010", + "recording_id": null, + "episode_id": 10, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the left side of the scene\nB. toward the bottom of the scene\nC. toward the top of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the left side of the scene" + } + ], + "images": [ + "images/task_00050_10_4386_camera_top.jpg", + "images/task_00050_10_4391_camera_top.jpg", + "images/task_00050_10_4396_camera_top.jpg", + "images/task_00050_10_4401_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000010", + "recording_id": null, + "episode_id": 10, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00050_10_4382_camera_top.jpg", + "images/task_00050_10_4570_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000010", + "recording_id": null, + "episode_id": 10, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_00050_10_4363_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000010", + "recording_id": null, + "episode_id": 10, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_00050_10_4572_camera_top.jpg", + "images/task_00050_10_4938_camera_top.jpg", + "images/task_00050_10_4526_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000010", + "recording_id": null, + "episode_id": 10, + "camera": "camera_top", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_00050_10_3080_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000010", + "recording_id": null, + "episode_id": 10, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00050_10_4380_camera_top.jpg", + "images/task_00050_10_4383_camera_top.jpg", + "images/task_00050_10_4386_camera_top.jpg", + "images/task_00050_10_4389_camera_top.jpg", + "images/task_00050_10_4392_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000010", + "recording_id": null, + "episode_id": 10, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_00050_10_4516_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000010", + "recording_id": null, + "episode_id": 10, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. pre-approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. pre-approach" + } + ], + "images": [ + "images/task_00050_17_3962_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000017", + "recording_id": null, + "episode_id": 17, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_00050_28_5740_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000028", + "recording_id": null, + "episode_id": 28, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the top of the scene\nA. toward the bottom of the scene\nB. toward the right side of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the top of the scene" + } + ], + "images": [ + "images/task_00050_28_5636_camera_top.jpg", + "images/task_00050_28_5641_camera_top.jpg", + "images/task_00050_28_5646_camera_top.jpg", + "images/task_00050_28_5651_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000028", + "recording_id": null, + "episode_id": 28, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00050_28_5775_camera_top.jpg", + "images/task_00050_28_5778_camera_top.jpg", + "images/task_00050_28_5781_camera_top.jpg", + "images/task_00050_28_5784_camera_top.jpg", + "images/task_00050_28_5787_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000028", + "recording_id": null, + "episode_id": 28, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_00050_28_5705_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000028", + "recording_id": null, + "episode_id": 28, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. approach\nC. transfer\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. hold and carry" + } + ], + "images": [ + "images/task_00050_28_5503_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000028", + "recording_id": null, + "episode_id": 28, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the left side of the scene\nC. toward the bottom of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00050_28_5823_camera_top.jpg", + "images/task_00050_28_5828_camera_top.jpg", + "images/task_00050_28_5833_camera_top.jpg", + "images/task_00050_28_5838_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000028", + "recording_id": null, + "episode_id": 28, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00050_28_5494_camera_top.jpg", + "images/task_00050_28_5497_camera_top.jpg", + "images/task_00050_28_5500_camera_top.jpg", + "images/task_00050_28_5503_camera_top.jpg", + "images/task_00050_28_5506_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000028", + "recording_id": null, + "episode_id": 28, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00050_28_5647_camera_top.jpg", + "images/task_00050_28_5650_camera_top.jpg", + "images/task_00050_28_5653_camera_top.jpg", + "images/task_00050_28_5656_camera_top.jpg", + "images/task_00050_28_5659_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000028", + "recording_id": null, + "episode_id": 28, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. transfer\nC. contact\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/task_00050_28_5578_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000028", + "recording_id": null, + "episode_id": 28, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_00050_28_5315_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000028", + "recording_id": null, + "episode_id": 28, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00050_28_5485_camera_top.jpg", + "images/task_00050_28_5488_camera_top.jpg", + "images/task_00050_28_5491_camera_top.jpg", + "images/task_00050_28_5494_camera_top.jpg", + "images/task_00050_28_5497_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000028", + "recording_id": null, + "episode_id": 28, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00050_28_5789_camera_top.jpg", + "images/task_00050_28_5617_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000028", + "recording_id": null, + "episode_id": 28, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00050_28_5297_camera_top.jpg", + "images/task_00050_28_5300_camera_top.jpg", + "images/task_00050_28_5303_camera_top.jpg", + "images/task_00050_28_5306_camera_top.jpg", + "images/task_00050_28_5309_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000028", + "recording_id": null, + "episode_id": 28, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00050_28_5256_camera_top.jpg", + "images/task_00050_28_5452_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000028", + "recording_id": null, + "episode_id": 28, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the bottom of the scene\nA. toward the left side of the scene\nB. toward the right side of the scene\nC. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00050_28_5098_camera_top.jpg", + "images/task_00050_28_5103_camera_top.jpg", + "images/task_00050_28_5108_camera_top.jpg", + "images/task_00050_28_5113_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000028", + "recording_id": null, + "episode_id": 28, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00050_28_5440_camera_top.jpg", + "images/task_00050_28_5569_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000028", + "recording_id": null, + "episode_id": 28, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00050_28_5883_camera_top.jpg", + "images/task_00050_28_5886_camera_top.jpg", + "images/task_00050_28_5889_camera_top.jpg", + "images/task_00050_28_5892_camera_top.jpg", + "images/task_00050_28_5895_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000028", + "recording_id": null, + "episode_id": 28, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00050_28_5207_camera_top.jpg", + "images/task_00050_28_5210_camera_top.jpg", + "images/task_00050_28_5213_camera_top.jpg", + "images/task_00050_28_5216_camera_top.jpg", + "images/task_00050_28_5219_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000028", + "recording_id": null, + "episode_id": 28, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00050_28_5489_camera_top.jpg", + "images/task_00050_28_5492_camera_top.jpg", + "images/task_00050_28_5495_camera_top.jpg", + "images/task_00050_28_5498_camera_top.jpg", + "images/task_00050_28_5501_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000028", + "recording_id": null, + "episode_id": 28, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00050_28_5350_camera_top.jpg", + "images/task_00050_28_5353_camera_top.jpg", + "images/task_00050_28_5356_camera_top.jpg", + "images/task_00050_28_5359_camera_top.jpg", + "images/task_00050_28_5362_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000028", + "recording_id": null, + "episode_id": 28, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_00050_28_929_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000028", + "recording_id": null, + "episode_id": 28, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. release\nC. approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/task_00050_28_5450_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000028", + "recording_id": null, + "episode_id": 28, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00050_28_6338_camera_top.jpg", + "images/task_00050_28_5488_camera_top.jpg", + "images/task_00050_28_5642_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000028", + "recording_id": null, + "episode_id": 28, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the top of the scene\nB. toward the left side of the scene\nC. toward the bottom of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the top of the scene" + } + ], + "images": [ + "images/task_00050_28_5534_camera_top.jpg", + "images/task_00050_28_5539_camera_top.jpg", + "images/task_00050_28_5544_camera_top.jpg", + "images/task_00050_28_5549_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000028", + "recording_id": null, + "episode_id": 28, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the left side of the scene\nA. toward the bottom of the scene\nB. toward the top of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the left side of the scene" + } + ], + "images": [ + "images/task_00050_28_5300_camera_top.jpg", + "images/task_00050_28_5305_camera_top.jpg", + "images/task_00050_28_5310_camera_top.jpg", + "images/task_00050_28_5315_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000028", + "recording_id": null, + "episode_id": 28, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00050_28_6251_camera_top.jpg", + "images/task_00050_28_5483_camera_top.jpg", + "images/task_00050_28_5210_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000028", + "recording_id": null, + "episode_id": 28, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the bottom of the scene\nA. toward the right side of the scene\nB. toward the top of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00050_28_5104_camera_top.jpg", + "images/task_00050_28_5109_camera_top.jpg", + "images/task_00050_28_5114_camera_top.jpg", + "images/task_00050_28_5119_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000028", + "recording_id": null, + "episode_id": 28, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. release\nC. pre-approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. pre-approach" + } + ], + "images": [ + "images/task_00050_42_1475_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000042", + "recording_id": null, + "episode_id": 42, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. contact\nC. pre-approach\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. pre-approach" + } + ], + "images": [ + "images/task_00050_77_2904_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000077", + "recording_id": null, + "episode_id": 77, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. pre-approach\nC. release\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/task_00050_87_3409_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000087", + "recording_id": null, + "episode_id": 87, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00050_87_772_camera_top.jpg", + "images/task_00050_87_706_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000087", + "recording_id": null, + "episode_id": 87, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00050_87_390_camera_top_t-6.jpg", + "images/task_00050_87_393_camera_top_t-3.jpg", + "images/task_00050_87_396_camera_top_t0.jpg", + "images/task_00050_87_399_camera_top_t+3.jpg", + "images/task_00050_87_402_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000087", + "recording_id": null, + "episode_id": 87, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00050_87_772_camera_top.jpg", + "images/task_00050_87_718_camera_top.jpg", + "images/task_00050_87_2677_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000087", + "recording_id": null, + "episode_id": 87, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. contact\nC. release\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/task_00050_87_3757_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000087", + "recording_id": null, + "episode_id": 87, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00050_87_1918_camera_top.jpg", + "images/task_00050_87_772_camera_top.jpg", + "images/task_00050_87_679_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000087", + "recording_id": null, + "episode_id": 87, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00050_87_772_camera_top.jpg", + "images/task_00050_87_501_camera_top.jpg", + "images/task_00050_87_3430_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000087", + "recording_id": null, + "episode_id": 87, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. release\nC. approach\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. pre-approach" + } + ], + "images": [ + "images/task_00050_91_400_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000091", + "recording_id": null, + "episode_id": 91, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. pre-approach" + } + ], + "images": [ + "images/task_00050_109_86_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000109", + "recording_id": null, + "episode_id": 109, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. release\nC. pre-approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. approach" + } + ], + "images": [ + "images/task_00050_109_117_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000109", + "recording_id": null, + "episode_id": 109, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. release\nC. pre-approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. approach" + } + ], + "images": [ + "images/task_00050_109_118_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000109", + "recording_id": null, + "episode_id": 109, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. transfer\nC. approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/task_00050_141_5553_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000141", + "recording_id": null, + "episode_id": 141, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00050_151_1648_camera_top.jpg", + "images/task_00050_151_1480_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000151", + "recording_id": null, + "episode_id": 151, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00050_151_1959_camera_top.jpg", + "images/task_00050_151_1767_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000151", + "recording_id": null, + "episode_id": 151, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00050_151_1006_camera_top.jpg", + "images/task_00050_151_4539_camera_top.jpg", + "images/task_00050_151_1759_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000151", + "recording_id": null, + "episode_id": 151, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00050_151_4207_camera_top.jpg", + "images/task_00050_151_1495_camera_top.jpg", + "images/task_00050_151_1910_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000151", + "recording_id": null, + "episode_id": 151, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. release\nC. approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/task_00050_151_1951_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000151", + "recording_id": null, + "episode_id": 151, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00050_151_1191_camera_top_t-6.jpg", + "images/task_00050_151_1194_camera_top_t-3.jpg", + "images/task_00050_151_1197_camera_top_t0.jpg", + "images/task_00050_151_1200_camera_top_t+3.jpg", + "images/task_00050_151_1203_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000151", + "recording_id": null, + "episode_id": 151, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_00050_151_2671_camera_top.jpg", + "images/task_00050_151_1955_camera_top.jpg", + "images/task_00050_151_133_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000151", + "recording_id": null, + "episode_id": 151, + "camera": "camera_top", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. transfer\nC. approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. pre-approach" + } + ], + "images": [ + "images/task_00050_151_392_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00050", + "split": "eval", + "split_group_id": "task_00050__episode_000151", + "recording_id": null, + "episode_id": 151, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_00051_0_414_camera_top.jpg", + "images/task_00051_0_475_camera_top.jpg", + "images/task_00051_0_1402_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00051", + "split": "eval", + "split_group_id": "task_00051__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00051_0_1576_camera_top.jpg", + "images/task_00051_0_1579_camera_top.jpg", + "images/task_00051_0_1582_camera_top.jpg", + "images/task_00051_0_1585_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00051", + "split": "eval", + "split_group_id": "task_00051__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. release\nC. contact\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/task_00051_0_665_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00051", + "split": "eval", + "split_group_id": "task_00051__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00051_0_188_camera_top.jpg", + "images/task_00051_0_191_camera_top.jpg", + "images/task_00051_0_194_camera_top.jpg", + "images/task_00051_0_197_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00051", + "split": "eval", + "split_group_id": "task_00051__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_00051_0_756_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00051", + "split": "eval", + "split_group_id": "task_00051__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. contact\nC. hold and carry\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. approach" + } + ], + "images": [ + "images/task_00051_0_917_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00051", + "split": "eval", + "split_group_id": "task_00051__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_00051_0_1626_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00051", + "split": "eval", + "split_group_id": "task_00051__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00051_0_1470_camera_top.jpg", + "images/task_00051_0_1473_camera_top.jpg", + "images/task_00051_0_1476_camera_top.jpg", + "images/task_00051_0_1479_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00051", + "split": "eval", + "split_group_id": "task_00051__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. contact\nC. release\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/task_00051_0_664_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00051", + "split": "eval", + "split_group_id": "task_00051__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_00051_31_1551_camera_top.jpg", + "images/task_00051_31_1466_camera_top.jpg", + "images/task_00051_31_1216_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00051", + "split": "eval", + "split_group_id": "task_00051__episode_000031", + "recording_id": null, + "episode_id": 31, + "camera": "camera_top", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00051_31_1109_camera_top.jpg", + "images/task_00051_31_1112_camera_top.jpg", + "images/task_00051_31_1115_camera_top.jpg", + "images/task_00051_31_1118_camera_top.jpg", + "images/task_00051_31_1121_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00051", + "split": "eval", + "split_group_id": "task_00051__episode_000031", + "recording_id": null, + "episode_id": 31, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00051_31_1533_camera_top.jpg", + "images/task_00051_31_1389_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00051", + "split": "eval", + "split_group_id": "task_00051__episode_000031", + "recording_id": null, + "episode_id": 31, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00051_31_309_camera_top.jpg", + "images/task_00051_31_312_camera_top.jpg", + "images/task_00051_31_315_camera_top.jpg", + "images/task_00051_31_318_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00051", + "split": "eval", + "split_group_id": "task_00051__episode_000031", + "recording_id": null, + "episode_id": 31, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00051_31_921_camera_top.jpg", + "images/task_00051_31_924_camera_top.jpg", + "images/task_00051_31_927_camera_top.jpg", + "images/task_00051_31_930_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00051", + "split": "eval", + "split_group_id": "task_00051__episode_000031", + "recording_id": null, + "episode_id": 31, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00051_31_1251_camera_top.jpg", + "images/task_00051_31_1254_camera_top.jpg", + "images/task_00051_31_1257_camera_top.jpg", + "images/task_00051_31_1260_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00051", + "split": "eval", + "split_group_id": "task_00051__episode_000031", + "recording_id": null, + "episode_id": 31, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the right side of the scene\nB. toward the top of the scene\nC. toward the bottom of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the right side of the scene" + } + ], + "images": [ + "images/task_00051_31_918_camera_top.jpg", + "images/task_00051_31_923_camera_top.jpg", + "images/task_00051_31_928_camera_top.jpg", + "images/task_00051_31_933_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00051", + "split": "eval", + "split_group_id": "task_00051__episode_000031", + "recording_id": null, + "episode_id": 31, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the left side of the scene\nA. toward the bottom of the scene\nC. toward the right side of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the left side of the scene" + } + ], + "images": [ + "images/task_00051_31_1084_camera_top.jpg", + "images/task_00051_31_1089_camera_top.jpg", + "images/task_00051_31_1094_camera_top.jpg", + "images/task_00051_31_1099_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00051", + "split": "eval", + "split_group_id": "task_00051__episode_000031", + "recording_id": null, + "episode_id": 31, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00051_31_450_camera_top.jpg", + "images/task_00051_31_453_camera_top.jpg", + "images/task_00051_31_456_camera_top.jpg", + "images/task_00051_31_459_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00051", + "split": "eval", + "split_group_id": "task_00051__episode_000031", + "recording_id": null, + "episode_id": 31, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00051_31_923_camera_top.jpg", + "images/task_00051_31_926_camera_top.jpg", + "images/task_00051_31_929_camera_top.jpg", + "images/task_00051_31_932_camera_top.jpg", + "images/task_00051_31_935_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00051", + "split": "eval", + "split_group_id": "task_00051__episode_000031", + "recording_id": null, + "episode_id": 31, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00051_31_1506_camera_top.jpg", + "images/task_00051_31_1509_camera_top.jpg", + "images/task_00051_31_1512_camera_top.jpg", + "images/task_00051_31_1515_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00051", + "split": "eval", + "split_group_id": "task_00051__episode_000031", + "recording_id": null, + "episode_id": 31, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00051_60_1484_camera_top.jpg", + "images/task_00051_60_561_camera_top.jpg", + "images/task_00051_60_988_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00051", + "split": "eval", + "split_group_id": "task_00051__episode_000060", + "recording_id": null, + "episode_id": 60, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00051_60_1830_camera_top.jpg", + "images/task_00051_60_1833_camera_top.jpg", + "images/task_00051_60_1836_camera_top.jpg", + "images/task_00051_60_1839_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00051", + "split": "eval", + "split_group_id": "task_00051__episode_000060", + "recording_id": null, + "episode_id": 60, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00051_60_527_camera_top.jpg", + "images/task_00051_60_530_camera_top.jpg", + "images/task_00051_60_533_camera_top.jpg", + "images/task_00051_60_536_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00051", + "split": "eval", + "split_group_id": "task_00051__episode_000060", + "recording_id": null, + "episode_id": 60, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00051_60_448_camera_top.jpg", + "images/task_00051_60_585_camera_top.jpg", + "images/task_00051_60_698_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00051", + "split": "eval", + "split_group_id": "task_00051__episode_000060", + "recording_id": null, + "episode_id": 60, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00051_60_579_camera_top.jpg", + "images/task_00051_60_1045_camera_top.jpg", + "images/task_00051_60_547_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00051", + "split": "eval", + "split_group_id": "task_00051__episode_000060", + "recording_id": null, + "episode_id": 60, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00051_60_898_camera_top.jpg", + "images/task_00051_60_901_camera_top.jpg", + "images/task_00051_60_904_camera_top.jpg", + "images/task_00051_60_907_camera_top.jpg", + "images/task_00051_60_910_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00051", + "split": "eval", + "split_group_id": "task_00051__episode_000060", + "recording_id": null, + "episode_id": 60, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00051_60_686_camera_top.jpg", + "images/task_00051_60_578_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00051", + "split": "eval", + "split_group_id": "task_00051__episode_000060", + "recording_id": null, + "episode_id": 60, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00051_60_244_camera_top.jpg", + "images/task_00051_60_247_camera_top.jpg", + "images/task_00051_60_250_camera_top.jpg", + "images/task_00051_60_253_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00051", + "split": "eval", + "split_group_id": "task_00051__episode_000060", + "recording_id": null, + "episode_id": 60, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the left side of the scene\nA. toward the bottom of the scene\nB. toward the top of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the left side of the scene" + } + ], + "images": [ + "images/task_00051_60_518_camera_top.jpg", + "images/task_00051_60_523_camera_top.jpg", + "images/task_00051_60_528_camera_top.jpg", + "images/task_00051_60_533_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00051", + "split": "eval", + "split_group_id": "task_00051__episode_000060", + "recording_id": null, + "episode_id": 60, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the top of the scene\nB. toward the bottom of the scene\nC. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00051_105_1390_camera_top.jpg", + "images/task_00051_105_1395_camera_top.jpg", + "images/task_00051_105_1400_camera_top.jpg", + "images/task_00051_105_1405_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00051", + "split": "eval", + "split_group_id": "task_00051__episode_000105", + "recording_id": null, + "episode_id": 105, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00051_105_696_camera_top.jpg", + "images/task_00051_105_699_camera_top.jpg", + "images/task_00051_105_702_camera_top.jpg", + "images/task_00051_105_705_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00051", + "split": "eval", + "split_group_id": "task_00051__episode_000105", + "recording_id": null, + "episode_id": 105, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the bottom of the scene\nA. toward the left side of the scene\nB. toward the right side of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00051_105_830_camera_top.jpg", + "images/task_00051_105_835_camera_top.jpg", + "images/task_00051_105_840_camera_top.jpg", + "images/task_00051_105_845_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00051", + "split": "eval", + "split_group_id": "task_00051__episode_000105", + "recording_id": null, + "episode_id": 105, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00051_105_701_camera_top.jpg", + "images/task_00051_105_646_camera_top.jpg", + "images/task_00051_105_1076_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00051", + "split": "eval", + "split_group_id": "task_00051__episode_000105", + "recording_id": null, + "episode_id": 105, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00051_105_602_camera_top.jpg", + "images/task_00051_105_605_camera_top.jpg", + "images/task_00051_105_608_camera_top.jpg", + "images/task_00051_105_611_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00051", + "split": "eval", + "split_group_id": "task_00051__episode_000105", + "recording_id": null, + "episode_id": 105, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00051_105_711_camera_top.jpg", + "images/task_00051_105_746_camera_top.jpg", + "images/task_00051_105_643_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00051", + "split": "eval", + "split_group_id": "task_00051__episode_000105", + "recording_id": null, + "episode_id": 105, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00051_105_848_camera_top_t-6.jpg", + "images/task_00051_105_851_camera_top_t-3.jpg", + "images/task_00051_105_854_camera_top_t0.jpg", + "images/task_00051_105_857_camera_top_t+3.jpg", + "images/task_00051_105_860_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00051", + "split": "eval", + "split_group_id": "task_00051__episode_000105", + "recording_id": null, + "episode_id": 105, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00051_105_686_camera_top.jpg", + "images/task_00051_105_614_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00051", + "split": "eval", + "split_group_id": "task_00051__episode_000105", + "recording_id": null, + "episode_id": 105, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00051_105_1351_camera_top.jpg", + "images/task_00051_105_1354_camera_top.jpg", + "images/task_00051_105_1357_camera_top.jpg", + "images/task_00051_105_1360_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00051", + "split": "eval", + "split_group_id": "task_00051__episode_000105", + "recording_id": null, + "episode_id": 105, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. hold and carry\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. hold and carry" + } + ], + "images": [ + "images/task_00051_105_650_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00051", + "split": "eval", + "split_group_id": "task_00051__episode_000105", + "recording_id": null, + "episode_id": 105, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00051_117_1026_camera_top.jpg", + "images/task_00051_117_1029_camera_top.jpg", + "images/task_00051_117_1032_camera_top.jpg", + "images/task_00051_117_1035_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00051", + "split": "eval", + "split_group_id": "task_00051__episode_000117", + "recording_id": null, + "episode_id": 117, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the top of the scene\nA. toward the left side of the scene\nB. toward the right side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the top of the scene" + } + ], + "images": [ + "images/task_00051_117_1270_camera_top.jpg", + "images/task_00051_117_1275_camera_top.jpg", + "images/task_00051_117_1280_camera_top.jpg", + "images/task_00051_117_1285_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00051", + "split": "eval", + "split_group_id": "task_00051__episode_000117", + "recording_id": null, + "episode_id": 117, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00051_117_1442_camera_top_t-6.jpg", + "images/task_00051_117_1445_camera_top_t-3.jpg", + "images/task_00051_117_1448_camera_top_t0.jpg", + "images/task_00051_117_1451_camera_top_t+3.jpg", + "images/task_00051_117_1454_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00051", + "split": "eval", + "split_group_id": "task_00051__episode_000117", + "recording_id": null, + "episode_id": 117, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the right side of the scene\nA. toward the bottom of the scene\nB. toward the left side of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the right side of the scene" + } + ], + "images": [ + "images/task_00051_117_1093_camera_top.jpg", + "images/task_00051_117_1098_camera_top.jpg", + "images/task_00051_117_1103_camera_top.jpg", + "images/task_00051_117_1108_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00051", + "split": "eval", + "split_group_id": "task_00051__episode_000117", + "recording_id": null, + "episode_id": 117, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00051_117_1226_camera_top.jpg", + "images/task_00051_117_1118_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00051", + "split": "eval", + "split_group_id": "task_00051__episode_000117", + "recording_id": null, + "episode_id": 117, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00051_117_205_camera_top.jpg", + "images/task_00051_117_208_camera_top.jpg", + "images/task_00051_117_211_camera_top.jpg", + "images/task_00051_117_214_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00051", + "split": "eval", + "split_group_id": "task_00051__episode_000117", + "recording_id": null, + "episode_id": 117, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00051_117_1173_camera_top.jpg", + "images/task_00051_117_1176_camera_top.jpg", + "images/task_00051_117_1179_camera_top.jpg", + "images/task_00051_117_1182_camera_top.jpg", + "images/task_00051_117_1185_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00051", + "split": "eval", + "split_group_id": "task_00051__episode_000117", + "recording_id": null, + "episode_id": 117, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_00051_117_562_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00051", + "split": "eval", + "split_group_id": "task_00051__episode_000117", + "recording_id": null, + "episode_id": 117, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00051_117_1391_camera_top.jpg", + "images/task_00051_117_1394_camera_top.jpg", + "images/task_00051_117_1397_camera_top.jpg", + "images/task_00051_117_1400_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00051", + "split": "eval", + "split_group_id": "task_00051__episode_000117", + "recording_id": null, + "episode_id": 117, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00053_44_1389_camera_top.jpg", + "images/task_00053_44_170_camera_top.jpg", + "images/task_00053_44_290_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00053", + "split": "eval", + "split_group_id": "task_00053__episode_000044", + "recording_id": null, + "episode_id": 44, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the top of the scene\nC. toward the left side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00053_52_177_camera_top.jpg", + "images/task_00053_52_182_camera_top.jpg", + "images/task_00053_52_187_camera_top.jpg", + "images/task_00053_52_192_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00053", + "split": "eval", + "split_group_id": "task_00053__episode_000052", + "recording_id": null, + "episode_id": 52, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the left side of the scene\nB. toward the top of the scene\nC. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00053_68_805_camera_top.jpg", + "images/task_00053_68_810_camera_top.jpg", + "images/task_00053_68_815_camera_top.jpg", + "images/task_00053_68_820_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00053", + "split": "eval", + "split_group_id": "task_00053__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_00053_78_198_camera_top.jpg", + "images/task_00053_78_243_camera_top.jpg", + "images/task_00053_78_274_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00053", + "split": "eval", + "split_group_id": "task_00053__episode_000078", + "recording_id": null, + "episode_id": 78, + "camera": "camera_top", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00053_94_1058_camera_top.jpg", + "images/task_00053_94_1061_camera_top.jpg", + "images/task_00053_94_1064_camera_top.jpg", + "images/task_00053_94_1067_camera_top.jpg", + "images/task_00053_94_1070_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00053", + "split": "eval", + "split_group_id": "task_00053__episode_000094", + "recording_id": null, + "episode_id": 94, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the right side of the scene\nA. toward the top of the scene\nB. toward the left side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the right side of the scene" + } + ], + "images": [ + "images/task_00053_94_426_camera_top.jpg", + "images/task_00053_94_431_camera_top.jpg", + "images/task_00053_94_436_camera_top.jpg", + "images/task_00053_94_441_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00053", + "split": "eval", + "split_group_id": "task_00053__episode_000094", + "recording_id": null, + "episode_id": 94, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00053_94_624_camera_top.jpg", + "images/task_00053_94_627_camera_top.jpg", + "images/task_00053_94_630_camera_top.jpg", + "images/task_00053_94_633_camera_top.jpg", + "images/task_00053_94_636_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00053", + "split": "eval", + "split_group_id": "task_00053__episode_000094", + "recording_id": null, + "episode_id": 94, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the top of the scene\nB. toward the left side of the scene\nC. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00053_94_808_camera_top.jpg", + "images/task_00053_94_813_camera_top.jpg", + "images/task_00053_94_818_camera_top.jpg", + "images/task_00053_94_823_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00053", + "split": "eval", + "split_group_id": "task_00053__episode_000094", + "recording_id": null, + "episode_id": 94, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. transfer\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. approach" + } + ], + "images": [ + "images/task_00053_94_1008_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00053", + "split": "eval", + "split_group_id": "task_00053__episode_000094", + "recording_id": null, + "episode_id": 94, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00053_94_944_camera_top.jpg", + "images/task_00053_94_947_camera_top.jpg", + "images/task_00053_94_950_camera_top.jpg", + "images/task_00053_94_953_camera_top.jpg", + "images/task_00053_94_956_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00053", + "split": "eval", + "split_group_id": "task_00053__episode_000094", + "recording_id": null, + "episode_id": 94, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the bottom of the scene\nC. toward the top of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00053_94_429_camera_top.jpg", + "images/task_00053_94_434_camera_top.jpg", + "images/task_00053_94_439_camera_top.jpg", + "images/task_00053_94_444_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00053", + "split": "eval", + "split_group_id": "task_00053__episode_000094", + "recording_id": null, + "episode_id": 94, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00054_35_455_camera_top.jpg", + "images/task_00054_35_458_camera_top.jpg", + "images/task_00054_35_461_camera_top.jpg", + "images/task_00054_35_464_camera_top.jpg", + "images/task_00054_35_467_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00054", + "split": "eval", + "split_group_id": "task_00054__episode_000035", + "recording_id": null, + "episode_id": 35, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the top of the scene\nA. toward the right side of the scene\nB. toward the bottom of the scene\nC. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the top of the scene" + } + ], + "images": [ + "images/task_00054_66_162_camera_top.jpg", + "images/task_00054_66_167_camera_top.jpg", + "images/task_00054_66_172_camera_top.jpg", + "images/task_00054_66_177_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00054", + "split": "eval", + "split_group_id": "task_00054__episode_000066", + "recording_id": null, + "episode_id": 66, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. transfer\nC. contact\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/task_00054_88_119_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00054", + "split": "eval", + "split_group_id": "task_00054__episode_000088", + "recording_id": null, + "episode_id": 88, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00054_121_159_camera_top.jpg", + "images/task_00054_121_484_camera_top.jpg", + "images/task_00054_121_786_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00054", + "split": "eval", + "split_group_id": "task_00054__episode_000121", + "recording_id": null, + "episode_id": 121, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. transfer\nC. pre-approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/task_00055_8_3832_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00055", + "split": "eval", + "split_group_id": "task_00055__episode_000008", + "recording_id": null, + "episode_id": 8, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00055_25_1979_camera_top.jpg", + "images/task_00055_25_890_camera_top.jpg", + "images/task_00055_25_3709_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00055", + "split": "eval", + "split_group_id": "task_00055__episode_000025", + "recording_id": null, + "episode_id": 25, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. pre-approach" + } + ], + "images": [ + "images/task_00055_36_27_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00055", + "split": "eval", + "split_group_id": "task_00055__episode_000036", + "recording_id": null, + "episode_id": 36, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. release\nC. approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/task_00055_39_4491_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00055", + "split": "eval", + "split_group_id": "task_00055__episode_000039", + "recording_id": null, + "episode_id": 39, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. transfer\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. approach" + } + ], + "images": [ + "images/task_00055_82_100_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00055", + "split": "eval", + "split_group_id": "task_00055__episode_000082", + "recording_id": null, + "episode_id": 82, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. pre-approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/task_00055_87_829_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00055", + "split": "eval", + "split_group_id": "task_00055__episode_000087", + "recording_id": null, + "episode_id": 87, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. hold and carry\nC. transfer\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/task_00055_87_1396_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00055", + "split": "eval", + "split_group_id": "task_00055__episode_000087", + "recording_id": null, + "episode_id": 87, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. approach\nC. transfer\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/task_00055_87_1936_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00055", + "split": "eval", + "split_group_id": "task_00055__episode_000087", + "recording_id": null, + "episode_id": 87, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00055_87_1799_camera_top_t-6.jpg", + "images/task_00055_87_1802_camera_top_t-3.jpg", + "images/task_00055_87_1805_camera_top_t0.jpg", + "images/task_00055_87_1808_camera_top_t+3.jpg", + "images/task_00055_87_1811_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00055", + "split": "eval", + "split_group_id": "task_00055__episode_000087", + "recording_id": null, + "episode_id": 87, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00055_87_906_camera_top.jpg", + "images/task_00055_87_909_camera_top.jpg", + "images/task_00055_87_912_camera_top.jpg", + "images/task_00055_87_915_camera_top.jpg", + "images/task_00055_87_918_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00055", + "split": "eval", + "split_group_id": "task_00055__episode_000087", + "recording_id": null, + "episode_id": 87, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00055_87_791_camera_top.jpg", + "images/task_00055_87_794_camera_top.jpg", + "images/task_00055_87_797_camera_top.jpg", + "images/task_00055_87_800_camera_top.jpg", + "images/task_00055_87_803_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00055", + "split": "eval", + "split_group_id": "task_00055__episode_000087", + "recording_id": null, + "episode_id": 87, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00055_87_1759_camera_top.jpg", + "images/task_00055_87_1884_camera_top.jpg", + "images/task_00055_87_615_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00055", + "split": "eval", + "split_group_id": "task_00055__episode_000087", + "recording_id": null, + "episode_id": 87, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00055_87_555_camera_top.jpg", + "images/task_00055_87_558_camera_top.jpg", + "images/task_00055_87_561_camera_top.jpg", + "images/task_00055_87_564_camera_top.jpg", + "images/task_00055_87_567_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00055", + "split": "eval", + "split_group_id": "task_00055__episode_000087", + "recording_id": null, + "episode_id": 87, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00055_102_1442_camera_top_t-6.jpg", + "images/task_00055_102_1445_camera_top_t-3.jpg", + "images/task_00055_102_1448_camera_top_t0.jpg", + "images/task_00055_102_1451_camera_top_t+3.jpg", + "images/task_00055_102_1454_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00055", + "split": "eval", + "split_group_id": "task_00055__episode_000102", + "recording_id": null, + "episode_id": 102, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00055_102_746_camera_top.jpg", + "images/task_00055_102_577_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00055", + "split": "eval", + "split_group_id": "task_00055__episode_000102", + "recording_id": null, + "episode_id": 102, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00055_102_1294_camera_top.jpg", + "images/task_00055_102_1297_camera_top.jpg", + "images/task_00055_102_1300_camera_top.jpg", + "images/task_00055_102_1303_camera_top.jpg", + "images/task_00055_102_1306_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00055", + "split": "eval", + "split_group_id": "task_00055__episode_000102", + "recording_id": null, + "episode_id": 102, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00055_102_761_camera_top.jpg", + "images/task_00055_102_522_camera_top.jpg", + "images/task_00055_102_992_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00055", + "split": "eval", + "split_group_id": "task_00055__episode_000102", + "recording_id": null, + "episode_id": 102, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_00055_102_706_camera_top.jpg", + "images/task_00055_102_996_camera_top.jpg", + "images/task_00055_102_421_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00055", + "split": "eval", + "split_group_id": "task_00055__episode_000102", + "recording_id": null, + "episode_id": 102, + "camera": "camera_top", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_00055_102_1012_camera_top.jpg", + "images/task_00055_102_732_camera_top.jpg", + "images/task_00055_102_1305_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00055", + "split": "eval", + "split_group_id": "task_00055__episode_000102", + "recording_id": null, + "episode_id": 102, + "camera": "camera_top", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00055_102_439_camera_top.jpg", + "images/task_00055_102_442_camera_top.jpg", + "images/task_00055_102_445_camera_top.jpg", + "images/task_00055_102_448_camera_top.jpg", + "images/task_00055_102_451_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00055", + "split": "eval", + "split_group_id": "task_00055__episode_000102", + "recording_id": null, + "episode_id": 102, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00055_107_475_camera_top.jpg", + "images/task_00055_107_478_camera_top.jpg", + "images/task_00055_107_481_camera_top.jpg", + "images/task_00055_107_484_camera_top.jpg", + "images/task_00055_107_487_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00055", + "split": "eval", + "split_group_id": "task_00055__episode_000107", + "recording_id": null, + "episode_id": 107, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_00055_107_400_camera_top.jpg", + "images/task_00055_107_1774_camera_top.jpg", + "images/task_00055_107_912_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00055", + "split": "eval", + "split_group_id": "task_00055__episode_000107", + "recording_id": null, + "episode_id": 107, + "camera": "camera_top", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00055_107_1595_camera_top.jpg", + "images/task_00055_107_1598_camera_top.jpg", + "images/task_00055_107_1601_camera_top.jpg", + "images/task_00055_107_1604_camera_top.jpg", + "images/task_00055_107_1607_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00055", + "split": "eval", + "split_group_id": "task_00055__episode_000107", + "recording_id": null, + "episode_id": 107, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00055_107_504_camera_top.jpg", + "images/task_00055_107_198_camera_top.jpg", + "images/task_00055_107_911_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00055", + "split": "eval", + "split_group_id": "task_00055__episode_000107", + "recording_id": null, + "episode_id": 107, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the right side of the scene\nA. toward the top of the scene\nB. toward the left side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the right side of the scene" + } + ], + "images": [ + "images/task_00055_107_1796_camera_top.jpg", + "images/task_00055_107_1801_camera_top.jpg", + "images/task_00055_107_1806_camera_top.jpg", + "images/task_00055_107_1811_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00055", + "split": "eval", + "split_group_id": "task_00055__episode_000107", + "recording_id": null, + "episode_id": 107, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/task_00055_107_1934_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00055", + "split": "eval", + "split_group_id": "task_00055__episode_000107", + "recording_id": null, + "episode_id": 107, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the top of the scene\nB. toward the left side of the scene\nC. toward the bottom of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the top of the scene" + } + ], + "images": [ + "images/task_00055_107_1433_camera_top.jpg", + "images/task_00055_107_1438_camera_top.jpg", + "images/task_00055_107_1443_camera_top.jpg", + "images/task_00055_107_1448_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00055", + "split": "eval", + "split_group_id": "task_00055__episode_000107", + "recording_id": null, + "episode_id": 107, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_00055_113_425_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00055", + "split": "eval", + "split_group_id": "task_00055__episode_000113", + "recording_id": null, + "episode_id": 113, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_00055_113_192_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00055", + "split": "eval", + "split_group_id": "task_00055__episode_000113", + "recording_id": null, + "episode_id": 113, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_00055_113_327_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00055", + "split": "eval", + "split_group_id": "task_00055__episode_000113", + "recording_id": null, + "episode_id": 113, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. transfer\nC. pre-approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/task_00055_113_410_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00055", + "split": "eval", + "split_group_id": "task_00055__episode_000113", + "recording_id": null, + "episode_id": 113, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00055_113_998_camera_top_t-6.jpg", + "images/task_00055_113_1001_camera_top_t-3.jpg", + "images/task_00055_113_1004_camera_top_t0.jpg", + "images/task_00055_113_1007_camera_top_t+3.jpg", + "images/task_00055_113_1010_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00055", + "split": "eval", + "split_group_id": "task_00055__episode_000113", + "recording_id": null, + "episode_id": 113, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00055_113_859_camera_top.jpg", + "images/task_00055_113_862_camera_top.jpg", + "images/task_00055_113_865_camera_top.jpg", + "images/task_00055_113_868_camera_top.jpg", + "images/task_00055_113_871_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00055", + "split": "eval", + "split_group_id": "task_00055__episode_000113", + "recording_id": null, + "episode_id": 113, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_00055_113_623_camera_top.jpg", + "images/task_00055_113_365_camera_top.jpg", + "images/task_00055_113_1440_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00055", + "split": "eval", + "split_group_id": "task_00055__episode_000113", + "recording_id": null, + "episode_id": 113, + "camera": "camera_top", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00055_113_1356_camera_top.jpg", + "images/task_00055_113_1247_camera_top.jpg", + "images/task_00055_113_408_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00055", + "split": "eval", + "split_group_id": "task_00055__episode_000113", + "recording_id": null, + "episode_id": 113, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. pre-approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. approach" + } + ], + "images": [ + "images/task_00055_127_343_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00055", + "split": "eval", + "split_group_id": "task_00055__episode_000127", + "recording_id": null, + "episode_id": 127, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. pre-approach\nC. release\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/task_00056_34_719_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00056", + "split": "eval", + "split_group_id": "task_00056__episode_000034", + "recording_id": null, + "episode_id": 34, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00056_78_588_camera_top.jpg", + "images/task_00056_78_591_camera_top.jpg", + "images/task_00056_78_594_camera_top.jpg", + "images/task_00056_78_597_camera_top.jpg", + "images/task_00056_78_600_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00056", + "split": "eval", + "split_group_id": "task_00056__episode_000078", + "recording_id": null, + "episode_id": 78, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00056_94_115_camera_top.jpg", + "images/task_00056_94_118_camera_top.jpg", + "images/task_00056_94_121_camera_top.jpg", + "images/task_00056_94_124_camera_top.jpg", + "images/task_00056_94_127_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00056", + "split": "eval", + "split_group_id": "task_00056__episode_000094", + "recording_id": null, + "episode_id": 94, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00056_105_572_camera_top.jpg", + "images/task_00056_105_575_camera_top.jpg", + "images/task_00056_105_578_camera_top.jpg", + "images/task_00056_105_581_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00056", + "split": "eval", + "split_group_id": "task_00056__episode_000105", + "recording_id": null, + "episode_id": 105, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00056_105_378_camera_top.jpg", + "images/task_00056_105_381_camera_top.jpg", + "images/task_00056_105_384_camera_top.jpg", + "images/task_00056_105_387_camera_top.jpg", + "images/task_00056_105_390_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00056", + "split": "eval", + "split_group_id": "task_00056__episode_000105", + "recording_id": null, + "episode_id": 105, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00056_105_383_camera_top.jpg", + "images/task_00056_105_386_camera_top.jpg", + "images/task_00056_105_389_camera_top.jpg", + "images/task_00056_105_392_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00056", + "split": "eval", + "split_group_id": "task_00056__episode_000105", + "recording_id": null, + "episode_id": 105, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00056_105_211_camera_top.jpg", + "images/task_00056_105_214_camera_top.jpg", + "images/task_00056_105_217_camera_top.jpg", + "images/task_00056_105_220_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00056", + "split": "eval", + "split_group_id": "task_00056__episode_000105", + "recording_id": null, + "episode_id": 105, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00056_105_209_camera_top.jpg", + "images/task_00056_105_212_camera_top.jpg", + "images/task_00056_105_215_camera_top.jpg", + "images/task_00056_105_218_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00056", + "split": "eval", + "split_group_id": "task_00056__episode_000105", + "recording_id": null, + "episode_id": 105, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_00056_105_50_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00056", + "split": "eval", + "split_group_id": "task_00056__episode_000105", + "recording_id": null, + "episode_id": 105, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00056_105_300_camera_top.jpg", + "images/task_00056_105_358_camera_top.jpg", + "images/task_00056_105_465_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00056", + "split": "eval", + "split_group_id": "task_00056__episode_000105", + "recording_id": null, + "episode_id": 105, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the left side of the scene\nA. toward the right side of the scene\nB. toward the top of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the left side of the scene" + } + ], + "images": [ + "images/task_00056_105_516_camera_top.jpg", + "images/task_00056_105_521_camera_top.jpg", + "images/task_00056_105_526_camera_top.jpg", + "images/task_00056_105_531_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00056", + "split": "eval", + "split_group_id": "task_00056__episode_000105", + "recording_id": null, + "episode_id": 105, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00056_105_470_camera_top.jpg", + "images/task_00056_105_473_camera_top.jpg", + "images/task_00056_105_476_camera_top.jpg", + "images/task_00056_105_479_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00056", + "split": "eval", + "split_group_id": "task_00056__episode_000105", + "recording_id": null, + "episode_id": 105, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00056_105_627_camera_top.jpg", + "images/task_00056_105_630_camera_top.jpg", + "images/task_00056_105_633_camera_top.jpg", + "images/task_00056_105_636_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00056", + "split": "eval", + "split_group_id": "task_00056__episode_000105", + "recording_id": null, + "episode_id": 105, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00057_3_258_camera_top.jpg", + "images/task_00057_3_261_camera_top.jpg", + "images/task_00057_3_264_camera_top.jpg", + "images/task_00057_3_267_camera_top.jpg", + "images/task_00057_3_270_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00057", + "split": "eval", + "split_group_id": "task_00057__episode_000003", + "recording_id": null, + "episode_id": 3, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00057_23_1782_camera_top.jpg", + "images/task_00057_23_1887_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00057", + "split": "eval", + "split_group_id": "task_00057__episode_000023", + "recording_id": null, + "episode_id": 23, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. approach\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/task_00057_23_2197_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00057", + "split": "eval", + "split_group_id": "task_00057__episode_000023", + "recording_id": null, + "episode_id": 23, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00057_23_1975_camera_top.jpg", + "images/task_00057_23_1787_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00057", + "split": "eval", + "split_group_id": "task_00057__episode_000023", + "recording_id": null, + "episode_id": 23, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00057_23_2335_camera_top.jpg", + "images/task_00057_23_2338_camera_top.jpg", + "images/task_00057_23_2341_camera_top.jpg", + "images/task_00057_23_2344_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00057", + "split": "eval", + "split_group_id": "task_00057__episode_000023", + "recording_id": null, + "episode_id": 23, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00057_23_2542_camera_top.jpg", + "images/task_00057_23_2545_camera_top.jpg", + "images/task_00057_23_2548_camera_top.jpg", + "images/task_00057_23_2551_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00057", + "split": "eval", + "split_group_id": "task_00057__episode_000023", + "recording_id": null, + "episode_id": 23, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00057_23_2223_camera_top_t-6.jpg", + "images/task_00057_23_2226_camera_top_t-3.jpg", + "images/task_00057_23_2229_camera_top_t0.jpg", + "images/task_00057_23_2232_camera_top_t+3.jpg", + "images/task_00057_23_2235_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00057", + "split": "eval", + "split_group_id": "task_00057__episode_000023", + "recording_id": null, + "episode_id": 23, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00057_23_1778_camera_top.jpg", + "images/task_00057_23_2000_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00057", + "split": "eval", + "split_group_id": "task_00057__episode_000023", + "recording_id": null, + "episode_id": 23, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00057_23_2897_camera_top.jpg", + "images/task_00057_23_2900_camera_top.jpg", + "images/task_00057_23_2903_camera_top.jpg", + "images/task_00057_23_2906_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00057", + "split": "eval", + "split_group_id": "task_00057__episode_000023", + "recording_id": null, + "episode_id": 23, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00057_50_1968_camera_top.jpg", + "images/task_00057_50_1971_camera_top.jpg", + "images/task_00057_50_1974_camera_top.jpg", + "images/task_00057_50_1977_camera_top.jpg", + "images/task_00057_50_1980_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00057", + "split": "eval", + "split_group_id": "task_00057__episode_000050", + "recording_id": null, + "episode_id": 50, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_00057_50_1577_camera_top.jpg", + "images/task_00057_50_1502_camera_top.jpg", + "images/task_00057_50_2065_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00057", + "split": "eval", + "split_group_id": "task_00057__episode_000050", + "recording_id": null, + "episode_id": 50, + "camera": "camera_top", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00057_50_1403_camera_top.jpg", + "images/task_00057_50_1406_camera_top.jpg", + "images/task_00057_50_1409_camera_top.jpg", + "images/task_00057_50_1412_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00057", + "split": "eval", + "split_group_id": "task_00057__episode_000050", + "recording_id": null, + "episode_id": 50, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00057_50_2546_camera_top.jpg", + "images/task_00057_50_2549_camera_top.jpg", + "images/task_00057_50_2552_camera_top.jpg", + "images/task_00057_50_2555_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00057", + "split": "eval", + "split_group_id": "task_00057__episode_000050", + "recording_id": null, + "episode_id": 50, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00057_50_1998_camera_top.jpg", + "images/task_00057_50_2001_camera_top.jpg", + "images/task_00057_50_2004_camera_top.jpg", + "images/task_00057_50_2007_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00057", + "split": "eval", + "split_group_id": "task_00057__episode_000050", + "recording_id": null, + "episode_id": 50, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00057_50_2649_camera_top.jpg", + "images/task_00057_50_2652_camera_top.jpg", + "images/task_00057_50_2655_camera_top.jpg", + "images/task_00057_50_2658_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00057", + "split": "eval", + "split_group_id": "task_00057__episode_000050", + "recording_id": null, + "episode_id": 50, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00057_50_1648_camera_top.jpg", + "images/task_00057_50_1651_camera_top.jpg", + "images/task_00057_50_1654_camera_top.jpg", + "images/task_00057_50_1657_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00057", + "split": "eval", + "split_group_id": "task_00057__episode_000050", + "recording_id": null, + "episode_id": 50, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00057_50_2774_camera_top.jpg", + "images/task_00057_50_2777_camera_top.jpg", + "images/task_00057_50_2780_camera_top.jpg", + "images/task_00057_50_2783_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00057", + "split": "eval", + "split_group_id": "task_00057__episode_000050", + "recording_id": null, + "episode_id": 50, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00057_50_1926_camera_top.jpg", + "images/task_00057_50_1929_camera_top.jpg", + "images/task_00057_50_1932_camera_top.jpg", + "images/task_00057_50_1935_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00057", + "split": "eval", + "split_group_id": "task_00057__episode_000050", + "recording_id": null, + "episode_id": 50, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. approach\nC. transfer\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. approach" + } + ], + "images": [ + "images/task_00057_100_1867_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00057", + "split": "eval", + "split_group_id": "task_00057__episode_000100", + "recording_id": null, + "episode_id": 100, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00057_164_2333_camera_top.jpg", + "images/task_00057_164_2336_camera_top.jpg", + "images/task_00057_164_2339_camera_top.jpg", + "images/task_00057_164_2342_camera_top.jpg", + "images/task_00057_164_2345_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00057", + "split": "eval", + "split_group_id": "task_00057__episode_000164", + "recording_id": null, + "episode_id": 164, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00058_0_426_camera_top.jpg", + "images/task_00058_0_429_camera_top.jpg", + "images/task_00058_0_432_camera_top.jpg", + "images/task_00058_0_435_camera_top.jpg", + "images/task_00058_0_438_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00058", + "split": "eval", + "split_group_id": "task_00058__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00058_0_899_camera_top_t-6.jpg", + "images/task_00058_0_902_camera_top_t-3.jpg", + "images/task_00058_0_905_camera_top_t0.jpg", + "images/task_00058_0_908_camera_top_t+3.jpg", + "images/task_00058_0_911_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00058", + "split": "eval", + "split_group_id": "task_00058__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the left side of the scene\nA. toward the right side of the scene\nB. toward the top of the scene\nC. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the left side of the scene" + } + ], + "images": [ + "images/task_00058_0_896_camera_top.jpg", + "images/task_00058_0_901_camera_top.jpg", + "images/task_00058_0_906_camera_top.jpg", + "images/task_00058_0_911_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00058", + "split": "eval", + "split_group_id": "task_00058__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_00058_0_488_camera_top.jpg", + "images/task_00058_0_521_camera_top.jpg", + "images/task_00058_0_338_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00058", + "split": "eval", + "split_group_id": "task_00058__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. approach\nC. pre-approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. hold and carry" + } + ], + "images": [ + "images/task_00058_0_885_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00058", + "split": "eval", + "split_group_id": "task_00058__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_00058_0_873_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00058", + "split": "eval", + "split_group_id": "task_00058__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_00058_0_695_camera_top.jpg", + "images/task_00058_0_339_camera_top.jpg", + "images/task_00058_0_994_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00058", + "split": "eval", + "split_group_id": "task_00058__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. hold and carry\nC. contact\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/task_00058_0_568_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00058", + "split": "eval", + "split_group_id": "task_00058__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00058_0_395_camera_top.jpg", + "images/task_00058_0_398_camera_top.jpg", + "images/task_00058_0_401_camera_top.jpg", + "images/task_00058_0_404_camera_top.jpg", + "images/task_00058_0_407_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00058", + "split": "eval", + "split_group_id": "task_00058__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00058_0_783_camera_top.jpg", + "images/task_00058_0_786_camera_top.jpg", + "images/task_00058_0_789_camera_top.jpg", + "images/task_00058_0_792_camera_top.jpg", + "images/task_00058_0_795_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00058", + "split": "eval", + "split_group_id": "task_00058__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. pre-approach\nC. transfer\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. approach" + } + ], + "images": [ + "images/task_00058_15_1460_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00058", + "split": "eval", + "split_group_id": "task_00058__episode_000015", + "recording_id": null, + "episode_id": 15, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_00058_27_970_camera_top.jpg", + "images/task_00058_27_1158_camera_top.jpg", + "images/task_00058_27_159_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00058", + "split": "eval", + "split_group_id": "task_00058__episode_000027", + "recording_id": null, + "episode_id": 27, + "camera": "camera_top", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00058_27_181_camera_top.jpg", + "images/task_00058_27_184_camera_top.jpg", + "images/task_00058_27_187_camera_top.jpg", + "images/task_00058_27_190_camera_top.jpg", + "images/task_00058_27_193_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00058", + "split": "eval", + "split_group_id": "task_00058__episode_000027", + "recording_id": null, + "episode_id": 27, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_00058_27_302_camera_top.jpg", + "images/task_00058_27_842_camera_top.jpg", + "images/task_00058_27_158_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00058", + "split": "eval", + "split_group_id": "task_00058__episode_000027", + "recording_id": null, + "episode_id": 27, + "camera": "camera_top", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. release\nC. transfer\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/task_00058_27_840_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00058", + "split": "eval", + "split_group_id": "task_00058__episode_000027", + "recording_id": null, + "episode_id": 27, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00058_27_852_camera_top.jpg", + "images/task_00058_27_855_camera_top.jpg", + "images/task_00058_27_858_camera_top.jpg", + "images/task_00058_27_861_camera_top.jpg", + "images/task_00058_27_864_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00058", + "split": "eval", + "split_group_id": "task_00058__episode_000027", + "recording_id": null, + "episode_id": 27, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00058_27_223_camera_top.jpg", + "images/task_00058_27_226_camera_top.jpg", + "images/task_00058_27_229_camera_top.jpg", + "images/task_00058_27_232_camera_top.jpg", + "images/task_00058_27_235_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00058", + "split": "eval", + "split_group_id": "task_00058__episode_000027", + "recording_id": null, + "episode_id": 27, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00058_27_1499_camera_top_t-6.jpg", + "images/task_00058_27_1502_camera_top_t-3.jpg", + "images/task_00058_27_1505_camera_top_t0.jpg", + "images/task_00058_27_1508_camera_top_t+3.jpg", + "images/task_00058_27_1511_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00058", + "split": "eval", + "split_group_id": "task_00058__episode_000027", + "recording_id": null, + "episode_id": 27, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. contact\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. pre-approach" + } + ], + "images": [ + "images/task_00058_30_339_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00058", + "split": "eval", + "split_group_id": "task_00058__episode_000030", + "recording_id": null, + "episode_id": 30, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00058_48_457_camera_top.jpg", + "images/task_00058_48_92_camera_top.jpg", + "images/task_00058_48_130_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00058", + "split": "eval", + "split_group_id": "task_00058__episode_000048", + "recording_id": null, + "episode_id": 48, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00058_66_97_camera_top.jpg", + "images/task_00058_66_100_camera_top.jpg", + "images/task_00058_66_103_camera_top.jpg", + "images/task_00058_66_106_camera_top.jpg", + "images/task_00058_66_109_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00058", + "split": "eval", + "split_group_id": "task_00058__episode_000066", + "recording_id": null, + "episode_id": 66, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00058_72_336_camera_top.jpg", + "images/task_00058_72_95_camera_top.jpg", + "images/task_00058_72_221_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00058", + "split": "eval", + "split_group_id": "task_00058__episode_000072", + "recording_id": null, + "episode_id": 72, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00058_85_92_camera_top.jpg", + "images/task_00058_85_95_camera_top.jpg", + "images/task_00058_85_98_camera_top.jpg", + "images/task_00058_85_101_camera_top.jpg", + "images/task_00058_85_104_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00058", + "split": "eval", + "split_group_id": "task_00058__episode_000085", + "recording_id": null, + "episode_id": 85, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the left side of the scene\nB. toward the top of the scene\nC. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00058_91_284_camera_top.jpg", + "images/task_00058_91_289_camera_top.jpg", + "images/task_00058_91_294_camera_top.jpg", + "images/task_00058_91_299_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00058", + "split": "eval", + "split_group_id": "task_00058__episode_000091", + "recording_id": null, + "episode_id": 91, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00058_100_146_camera_top.jpg", + "images/task_00058_100_257_camera_top.jpg", + "images/task_00058_100_296_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00058", + "split": "eval", + "split_group_id": "task_00058__episode_000100", + "recording_id": null, + "episode_id": 100, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the right side of the scene\nB. toward the bottom of the scene\nC. toward the left side of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the right side of the scene" + } + ], + "images": [ + "images/task_00058_103_306_camera_top.jpg", + "images/task_00058_103_311_camera_top.jpg", + "images/task_00058_103_316_camera_top.jpg", + "images/task_00058_103_321_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00058", + "split": "eval", + "split_group_id": "task_00058__episode_000103", + "recording_id": null, + "episode_id": 103, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00058_142_410_camera_top.jpg", + "images/task_00058_142_186_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00058", + "split": "eval", + "split_group_id": "task_00058__episode_000142", + "recording_id": null, + "episode_id": 142, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00058_142_585_camera_top.jpg", + "images/task_00058_142_462_camera_top.jpg", + "images/task_00058_142_181_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00058", + "split": "eval", + "split_group_id": "task_00058__episode_000142", + "recording_id": null, + "episode_id": 142, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00058_142_378_camera_top.jpg", + "images/task_00058_142_381_camera_top.jpg", + "images/task_00058_142_384_camera_top.jpg", + "images/task_00058_142_387_camera_top.jpg", + "images/task_00058_142_390_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00058", + "split": "eval", + "split_group_id": "task_00058__episode_000142", + "recording_id": null, + "episode_id": 142, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_00058_142_112_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00058", + "split": "eval", + "split_group_id": "task_00058__episode_000142", + "recording_id": null, + "episode_id": 142, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the left side of the scene\nA. toward the bottom of the scene\nC. toward the right side of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the left side of the scene" + } + ], + "images": [ + "images/task_00058_142_159_camera_top.jpg", + "images/task_00058_142_164_camera_top.jpg", + "images/task_00058_142_169_camera_top.jpg", + "images/task_00058_142_174_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00058", + "split": "eval", + "split_group_id": "task_00058__episode_000142", + "recording_id": null, + "episode_id": 142, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. release\nC. pre-approach\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/task_00058_142_579_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00058", + "split": "eval", + "split_group_id": "task_00058__episode_000142", + "recording_id": null, + "episode_id": 142, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_00058_142_708_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00058", + "split": "eval", + "split_group_id": "task_00058__episode_000142", + "recording_id": null, + "episode_id": 142, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the left side of the scene\nA. toward the top of the scene\nC. toward the right side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the left side of the scene" + } + ], + "images": [ + "images/task_00058_142_517_camera_top.jpg", + "images/task_00058_142_522_camera_top.jpg", + "images/task_00058_142_527_camera_top.jpg", + "images/task_00058_142_532_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00058", + "split": "eval", + "split_group_id": "task_00058__episode_000142", + "recording_id": null, + "episode_id": 142, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_00058_142_572_camera_top.jpg", + "images/task_00058_142_283_camera_top.jpg", + "images/task_00058_142_523_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00058", + "split": "eval", + "split_group_id": "task_00058__episode_000142", + "recording_id": null, + "episode_id": 142, + "camera": "camera_top", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the top of the scene\nB. toward the left side of the scene\nC. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00058_142_621_camera_top.jpg", + "images/task_00058_142_626_camera_top.jpg", + "images/task_00058_142_631_camera_top.jpg", + "images/task_00058_142_636_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00058", + "split": "eval", + "split_group_id": "task_00058__episode_000142", + "recording_id": null, + "episode_id": 142, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the left side of the scene\nA. toward the top of the scene\nC. toward the bottom of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the left side of the scene" + } + ], + "images": [ + "images/task_00060_7_263_camera_top.jpg", + "images/task_00060_7_268_camera_top.jpg", + "images/task_00060_7_273_camera_top.jpg", + "images/task_00060_7_278_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00060", + "split": "eval", + "split_group_id": "task_00060__episode_000007", + "recording_id": null, + "episode_id": 7, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. release\nC. transfer\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/task_00060_10_262_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00060", + "split": "eval", + "split_group_id": "task_00060__episode_000010", + "recording_id": null, + "episode_id": 10, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00060_26_266_camera_top.jpg", + "images/task_00060_26_355_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00060", + "split": "eval", + "split_group_id": "task_00060__episode_000026", + "recording_id": null, + "episode_id": 26, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the top of the scene\nA. toward the right side of the scene\nC. toward the bottom of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the top of the scene" + } + ], + "images": [ + "images/task_00060_26_431_camera_top.jpg", + "images/task_00060_26_436_camera_top.jpg", + "images/task_00060_26_441_camera_top.jpg", + "images/task_00060_26_446_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00060", + "split": "eval", + "split_group_id": "task_00060__episode_000026", + "recording_id": null, + "episode_id": 26, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the top of the scene\nB. toward the bottom of the scene\nC. toward the right side of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the top of the scene" + } + ], + "images": [ + "images/task_00060_26_239_camera_top.jpg", + "images/task_00060_26_244_camera_top.jpg", + "images/task_00060_26_249_camera_top.jpg", + "images/task_00060_26_254_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00060", + "split": "eval", + "split_group_id": "task_00060__episode_000026", + "recording_id": null, + "episode_id": 26, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00060_26_148_camera_top_t-6.jpg", + "images/task_00060_26_151_camera_top_t-3.jpg", + "images/task_00060_26_154_camera_top_t0.jpg", + "images/task_00060_26_157_camera_top_t+3.jpg", + "images/task_00060_26_160_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00060", + "split": "eval", + "split_group_id": "task_00060__episode_000026", + "recording_id": null, + "episode_id": 26, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. release\nC. approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/task_00060_26_432_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00060", + "split": "eval", + "split_group_id": "task_00060__episode_000026", + "recording_id": null, + "episode_id": 26, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the bottom of the scene\nA. toward the top of the scene\nC. toward the left side of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00060_26_214_camera_top.jpg", + "images/task_00060_26_219_camera_top.jpg", + "images/task_00060_26_224_camera_top.jpg", + "images/task_00060_26_229_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00060", + "split": "eval", + "split_group_id": "task_00060__episode_000026", + "recording_id": null, + "episode_id": 26, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00060_26_388_camera_top.jpg", + "images/task_00060_26_295_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00060", + "split": "eval", + "split_group_id": "task_00060__episode_000026", + "recording_id": null, + "episode_id": 26, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. transfer\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. pre-approach" + } + ], + "images": [ + "images/task_00060_34_479_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00060", + "split": "eval", + "split_group_id": "task_00060__episode_000034", + "recording_id": null, + "episode_id": 34, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the right side of the scene\nB. toward the top of the scene\nC. toward the bottom of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the right side of the scene" + } + ], + "images": [ + "images/task_00060_42_238_camera_top.jpg", + "images/task_00060_42_243_camera_top.jpg", + "images/task_00060_42_248_camera_top.jpg", + "images/task_00060_42_253_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00060", + "split": "eval", + "split_group_id": "task_00060__episode_000042", + "recording_id": null, + "episode_id": 42, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. contact\nC. transfer\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. pre-approach" + } + ], + "images": [ + "images/task_00060_53_199_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00060", + "split": "eval", + "split_group_id": "task_00060__episode_000053", + "recording_id": null, + "episode_id": 53, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/task_00060_55_534_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00060", + "split": "eval", + "split_group_id": "task_00060__episode_000055", + "recording_id": null, + "episode_id": 55, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the top of the scene\nA. toward the right side of the scene\nB. toward the bottom of the scene\nC. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the top of the scene" + } + ], + "images": [ + "images/task_00060_63_419_camera_top.jpg", + "images/task_00060_63_424_camera_top.jpg", + "images/task_00060_63_429_camera_top.jpg", + "images/task_00060_63_434_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00060", + "split": "eval", + "split_group_id": "task_00060__episode_000063", + "recording_id": null, + "episode_id": 63, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00060_63_321_camera_top.jpg", + "images/task_00060_63_492_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00060", + "split": "eval", + "split_group_id": "task_00060__episode_000063", + "recording_id": null, + "episode_id": 63, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the left side of the scene\nA. toward the right side of the scene\nC. toward the bottom of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the left side of the scene" + } + ], + "images": [ + "images/task_00060_63_467_camera_top.jpg", + "images/task_00060_63_472_camera_top.jpg", + "images/task_00060_63_477_camera_top.jpg", + "images/task_00060_63_482_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00060", + "split": "eval", + "split_group_id": "task_00060__episode_000063", + "recording_id": null, + "episode_id": 63, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the right side of the scene\nB. toward the top of the scene\nC. toward the bottom of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the right side of the scene" + } + ], + "images": [ + "images/task_00060_63_343_camera_top.jpg", + "images/task_00060_63_348_camera_top.jpg", + "images/task_00060_63_353_camera_top.jpg", + "images/task_00060_63_358_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00060", + "split": "eval", + "split_group_id": "task_00060__episode_000063", + "recording_id": null, + "episode_id": 63, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00060_63_229_camera_top_t-6.jpg", + "images/task_00060_63_232_camera_top_t-3.jpg", + "images/task_00060_63_235_camera_top_t0.jpg", + "images/task_00060_63_238_camera_top_t+3.jpg", + "images/task_00060_63_241_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00060", + "split": "eval", + "split_group_id": "task_00060__episode_000063", + "recording_id": null, + "episode_id": 63, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. pre-approach" + } + ], + "images": [ + "images/task_00060_63_46_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00060", + "split": "eval", + "split_group_id": "task_00060__episode_000063", + "recording_id": null, + "episode_id": 63, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00060_63_334_camera_top.jpg", + "images/task_00060_63_491_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00060", + "split": "eval", + "split_group_id": "task_00060__episode_000063", + "recording_id": null, + "episode_id": 63, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. approach\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/task_00060_65_454_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00060", + "split": "eval", + "split_group_id": "task_00060__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. transfer\nC. pre-approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. pre-approach" + } + ], + "images": [ + "images/task_00060_92_227_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00060", + "split": "eval", + "split_group_id": "task_00060__episode_000092", + "recording_id": null, + "episode_id": 92, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. pre-approach\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. approach" + } + ], + "images": [ + "images/task_00060_100_181_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00060", + "split": "eval", + "split_group_id": "task_00060__episode_000100", + "recording_id": null, + "episode_id": 100, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. pre-approach\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. approach" + } + ], + "images": [ + "images/task_00060_106_159_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00060", + "split": "eval", + "split_group_id": "task_00060__episode_000106", + "recording_id": null, + "episode_id": 106, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the top of the scene\nA. toward the right side of the scene\nC. toward the bottom of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the top of the scene" + } + ], + "images": [ + "images/task_00060_111_127_camera_top.jpg", + "images/task_00060_111_132_camera_top.jpg", + "images/task_00060_111_137_camera_top.jpg", + "images/task_00060_111_142_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00060", + "split": "eval", + "split_group_id": "task_00060__episode_000111", + "recording_id": null, + "episode_id": 111, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. contact\nC. pre-approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. pre-approach" + } + ], + "images": [ + "images/task_00060_114_80_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00060", + "split": "eval", + "split_group_id": "task_00060__episode_000114", + "recording_id": null, + "episode_id": 114, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the bottom of the scene\nA. toward the left side of the scene\nC. toward the top of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00061_32_94_camera_top.jpg", + "images/task_00061_32_99_camera_top.jpg", + "images/task_00061_32_104_camera_top.jpg", + "images/task_00061_32_109_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00061", + "split": "eval", + "split_group_id": "task_00061__episode_000032", + "recording_id": null, + "episode_id": 32, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the left side of the scene\nA. toward the top of the scene\nB. toward the bottom of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the left side of the scene" + } + ], + "images": [ + "images/task_00061_48_320_camera_top.jpg", + "images/task_00061_48_325_camera_top.jpg", + "images/task_00061_48_330_camera_top.jpg", + "images/task_00061_48_335_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00061", + "split": "eval", + "split_group_id": "task_00061__episode_000048", + "recording_id": null, + "episode_id": 48, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00061_85_93_camera_top_t-6.jpg", + "images/task_00061_85_96_camera_top_t-3.jpg", + "images/task_00061_85_99_camera_top_t0.jpg", + "images/task_00061_85_102_camera_top_t+3.jpg", + "images/task_00061_85_105_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00061", + "split": "eval", + "split_group_id": "task_00061__episode_000085", + "recording_id": null, + "episode_id": 85, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00061_85_812_camera_top.jpg", + "images/task_00061_85_912_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00061", + "split": "eval", + "split_group_id": "task_00061__episode_000085", + "recording_id": null, + "episode_id": 85, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00061_85_874_camera_top.jpg", + "images/task_00061_85_757_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00061", + "split": "eval", + "split_group_id": "task_00061__episode_000085", + "recording_id": null, + "episode_id": 85, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00061_85_665_camera_top.jpg", + "images/task_00061_85_668_camera_top.jpg", + "images/task_00061_85_671_camera_top.jpg", + "images/task_00061_85_674_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00061", + "split": "eval", + "split_group_id": "task_00061__episode_000085", + "recording_id": null, + "episode_id": 85, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00061_85_94_camera_top.jpg", + "images/task_00061_85_97_camera_top.jpg", + "images/task_00061_85_100_camera_top.jpg", + "images/task_00061_85_103_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00061", + "split": "eval", + "split_group_id": "task_00061__episode_000085", + "recording_id": null, + "episode_id": 85, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00061_85_480_camera_top.jpg", + "images/task_00061_85_483_camera_top.jpg", + "images/task_00061_85_486_camera_top.jpg", + "images/task_00061_85_489_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00061", + "split": "eval", + "split_group_id": "task_00061__episode_000085", + "recording_id": null, + "episode_id": 85, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the top of the scene\nB. toward the left side of the scene\nC. toward the right side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the top of the scene" + } + ], + "images": [ + "images/task_00061_96_396_camera_top.jpg", + "images/task_00061_96_401_camera_top.jpg", + "images/task_00061_96_406_camera_top.jpg", + "images/task_00061_96_411_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00061", + "split": "eval", + "split_group_id": "task_00061__episode_000096", + "recording_id": null, + "episode_id": 96, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. pre-approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/task_00061_101_133_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00061", + "split": "eval", + "split_group_id": "task_00061__episode_000101", + "recording_id": null, + "episode_id": 101, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the top of the scene\nB. toward the bottom of the scene\nC. toward the left side of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the top of the scene" + } + ], + "images": [ + "images/task_00061_109_328_camera_top.jpg", + "images/task_00061_109_333_camera_top.jpg", + "images/task_00061_109_338_camera_top.jpg", + "images/task_00061_109_343_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00061", + "split": "eval", + "split_group_id": "task_00061__episode_000109", + "recording_id": null, + "episode_id": 109, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00062_5_662_camera_top.jpg", + "images/task_00062_5_806_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000005", + "recording_id": null, + "episode_id": 5, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00062_5_1775_camera_top_t-6.jpg", + "images/task_00062_5_1778_camera_top_t-3.jpg", + "images/task_00062_5_1781_camera_top_t0.jpg", + "images/task_00062_5_1784_camera_top_t+3.jpg", + "images/task_00062_5_1787_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000005", + "recording_id": null, + "episode_id": 5, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00062_5_1631_camera_top_t-6.jpg", + "images/task_00062_5_1634_camera_top_t-3.jpg", + "images/task_00062_5_1637_camera_top_t0.jpg", + "images/task_00062_5_1640_camera_top_t+3.jpg", + "images/task_00062_5_1643_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000005", + "recording_id": null, + "episode_id": 5, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00062_5_1274_camera_top_t-6.jpg", + "images/task_00062_5_1277_camera_top_t-3.jpg", + "images/task_00062_5_1280_camera_top_t0.jpg", + "images/task_00062_5_1283_camera_top_t+3.jpg", + "images/task_00062_5_1286_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000005", + "recording_id": null, + "episode_id": 5, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00062_5_1472_camera_top_t-6.jpg", + "images/task_00062_5_1475_camera_top_t-3.jpg", + "images/task_00062_5_1478_camera_top_t0.jpg", + "images/task_00062_5_1481_camera_top_t+3.jpg", + "images/task_00062_5_1484_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000005", + "recording_id": null, + "episode_id": 5, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00062_15_634_camera_top.jpg", + "images/task_00062_15_637_camera_top.jpg", + "images/task_00062_15_640_camera_top.jpg", + "images/task_00062_15_643_camera_top.jpg", + "images/task_00062_15_646_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000015", + "recording_id": null, + "episode_id": 15, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00062_15_820_camera_top.jpg", + "images/task_00062_15_823_camera_top.jpg", + "images/task_00062_15_826_camera_top.jpg", + "images/task_00062_15_829_camera_top.jpg", + "images/task_00062_15_832_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000015", + "recording_id": null, + "episode_id": 15, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00062_15_1271_camera_top.jpg", + "images/task_00062_15_1274_camera_top.jpg", + "images/task_00062_15_1277_camera_top.jpg", + "images/task_00062_15_1280_camera_top.jpg", + "images/task_00062_15_1283_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000015", + "recording_id": null, + "episode_id": 15, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00062_15_1077_camera_top.jpg", + "images/task_00062_15_1542_camera_top.jpg", + "images/task_00062_15_104_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000015", + "recording_id": null, + "episode_id": 15, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00062_15_542_camera_top.jpg", + "images/task_00062_15_1588_camera_top.jpg", + "images/task_00062_15_1127_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000015", + "recording_id": null, + "episode_id": 15, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00062_15_1609_camera_top.jpg", + "images/task_00062_15_713_camera_top.jpg", + "images/task_00062_15_1163_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000015", + "recording_id": null, + "episode_id": 15, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the top of the scene\nB. toward the bottom of the scene\nC. toward the right side of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the top of the scene" + } + ], + "images": [ + "images/task_00062_15_1473_camera_top.jpg", + "images/task_00062_15_1478_camera_top.jpg", + "images/task_00062_15_1483_camera_top.jpg", + "images/task_00062_15_1488_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000015", + "recording_id": null, + "episode_id": 15, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00062_15_1164_camera_top.jpg", + "images/task_00062_15_1093_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000015", + "recording_id": null, + "episode_id": 15, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00062_15_1162_camera_top.jpg", + "images/task_00062_15_1398_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000015", + "recording_id": null, + "episode_id": 15, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. release\nC. approach\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. approach" + } + ], + "images": [ + "images/task_00062_31_776_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000031", + "recording_id": null, + "episode_id": 31, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the right side of the scene\nB. toward the bottom of the scene\nC. toward the top of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the right side of the scene" + } + ], + "images": [ + "images/task_00062_39_2099_camera_top.jpg", + "images/task_00062_39_2104_camera_top.jpg", + "images/task_00062_39_2109_camera_top.jpg", + "images/task_00062_39_2114_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000039", + "recording_id": null, + "episode_id": 39, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. transfer\nC. approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/task_00062_41_2689_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000041", + "recording_id": null, + "episode_id": 41, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the bottom of the scene\nB. toward the top of the scene\nC. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00062_44_2378_camera_top.jpg", + "images/task_00062_44_2383_camera_top.jpg", + "images/task_00062_44_2388_camera_top.jpg", + "images/task_00062_44_2393_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000044", + "recording_id": null, + "episode_id": 44, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. approach\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/task_00062_49_2120_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000049", + "recording_id": null, + "episode_id": 49, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the right side of the scene\nB. toward the top of the scene\nC. toward the bottom of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the right side of the scene" + } + ], + "images": [ + "images/task_00062_52_711_camera_top.jpg", + "images/task_00062_52_716_camera_top.jpg", + "images/task_00062_52_721_camera_top.jpg", + "images/task_00062_52_726_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000052", + "recording_id": null, + "episode_id": 52, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. approach\nC. transfer\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. pre-approach" + } + ], + "images": [ + "images/task_00062_54_23_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000054", + "recording_id": null, + "episode_id": 54, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the bottom of the scene\nC. toward the left side of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00062_57_1401_camera_top.jpg", + "images/task_00062_57_1406_camera_top.jpg", + "images/task_00062_57_1411_camera_top.jpg", + "images/task_00062_57_1416_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000057", + "recording_id": null, + "episode_id": 57, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. pre-approach\nC. transfer\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/task_00062_70_1771_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000070", + "recording_id": null, + "episode_id": 70, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00062_80_1131_camera_top.jpg", + "images/task_00062_80_1078_camera_top.jpg", + "images/task_00062_80_1958_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000080", + "recording_id": null, + "episode_id": 80, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00062_80_1068_camera_top.jpg", + "images/task_00062_80_1130_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000080", + "recording_id": null, + "episode_id": 80, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00062_80_1109_camera_top.jpg", + "images/task_00062_80_1112_camera_top.jpg", + "images/task_00062_80_1115_camera_top.jpg", + "images/task_00062_80_1118_camera_top.jpg", + "images/task_00062_80_1121_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000080", + "recording_id": null, + "episode_id": 80, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00062_80_1099_camera_top.jpg", + "images/task_00062_80_1102_camera_top.jpg", + "images/task_00062_80_1105_camera_top.jpg", + "images/task_00062_80_1108_camera_top.jpg", + "images/task_00062_80_1111_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000080", + "recording_id": null, + "episode_id": 80, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_00062_80_2025_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000080", + "recording_id": null, + "episode_id": 80, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_00062_80_1105_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000080", + "recording_id": null, + "episode_id": 80, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_00062_80_1072_camera_top.jpg", + "images/task_00062_80_2062_camera_top.jpg", + "images/task_00062_80_1129_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000080", + "recording_id": null, + "episode_id": 80, + "camera": "camera_top", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00062_80_2218_camera_top_t-6.jpg", + "images/task_00062_80_2221_camera_top_t-3.jpg", + "images/task_00062_80_2224_camera_top_t0.jpg", + "images/task_00062_80_2227_camera_top_t+3.jpg", + "images/task_00062_80_2230_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000080", + "recording_id": null, + "episode_id": 80, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the top of the scene\nB. toward the left side of the scene\nC. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00062_86_1216_camera_top.jpg", + "images/task_00062_86_1221_camera_top.jpg", + "images/task_00062_86_1226_camera_top.jpg", + "images/task_00062_86_1231_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000086", + "recording_id": null, + "episode_id": 86, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00062_91_1348_camera_top.jpg", + "images/task_00062_91_1351_camera_top.jpg", + "images/task_00062_91_1354_camera_top.jpg", + "images/task_00062_91_1357_camera_top.jpg", + "images/task_00062_91_1360_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000091", + "recording_id": null, + "episode_id": 91, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00062_91_238_camera_top.jpg", + "images/task_00062_91_241_camera_top.jpg", + "images/task_00062_91_244_camera_top.jpg", + "images/task_00062_91_247_camera_top.jpg", + "images/task_00062_91_250_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000091", + "recording_id": null, + "episode_id": 91, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00062_91_92_camera_top.jpg", + "images/task_00062_91_95_camera_top.jpg", + "images/task_00062_91_98_camera_top.jpg", + "images/task_00062_91_101_camera_top.jpg", + "images/task_00062_91_104_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000091", + "recording_id": null, + "episode_id": 91, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00062_91_551_camera_top.jpg", + "images/task_00062_91_554_camera_top.jpg", + "images/task_00062_91_557_camera_top.jpg", + "images/task_00062_91_560_camera_top.jpg", + "images/task_00062_91_563_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000091", + "recording_id": null, + "episode_id": 91, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00062_91_1048_camera_top.jpg", + "images/task_00062_91_1051_camera_top.jpg", + "images/task_00062_91_1054_camera_top.jpg", + "images/task_00062_91_1057_camera_top.jpg", + "images/task_00062_91_1060_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000091", + "recording_id": null, + "episode_id": 91, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00062_91_276_camera_top.jpg", + "images/task_00062_91_279_camera_top.jpg", + "images/task_00062_91_282_camera_top.jpg", + "images/task_00062_91_285_camera_top.jpg", + "images/task_00062_91_288_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000091", + "recording_id": null, + "episode_id": 91, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00062_91_1001_camera_top.jpg", + "images/task_00062_91_1004_camera_top.jpg", + "images/task_00062_91_1007_camera_top.jpg", + "images/task_00062_91_1010_camera_top.jpg", + "images/task_00062_91_1013_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000091", + "recording_id": null, + "episode_id": 91, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_00062_91_1852_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000091", + "recording_id": null, + "episode_id": 91, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00062_91_1704_camera_top.jpg", + "images/task_00062_91_1707_camera_top.jpg", + "images/task_00062_91_1710_camera_top.jpg", + "images/task_00062_91_1713_camera_top.jpg", + "images/task_00062_91_1716_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000091", + "recording_id": null, + "episode_id": 91, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_00062_91_1897_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000091", + "recording_id": null, + "episode_id": 91, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00062_91_749_camera_top.jpg", + "images/task_00062_91_571_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000091", + "recording_id": null, + "episode_id": 91, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00062_91_942_camera_top.jpg", + "images/task_00062_91_945_camera_top.jpg", + "images/task_00062_91_948_camera_top.jpg", + "images/task_00062_91_951_camera_top.jpg", + "images/task_00062_91_954_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000091", + "recording_id": null, + "episode_id": 91, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00062_91_174_camera_top.jpg", + "images/task_00062_91_177_camera_top.jpg", + "images/task_00062_91_180_camera_top.jpg", + "images/task_00062_91_183_camera_top.jpg", + "images/task_00062_91_186_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000091", + "recording_id": null, + "episode_id": 91, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. pre-approach\nC. release\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. approach" + } + ], + "images": [ + "images/task_00062_94_142_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000094", + "recording_id": null, + "episode_id": 94, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. approach\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/task_00062_96_1959_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000096", + "recording_id": null, + "episode_id": 96, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. pre-approach\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. approach" + } + ], + "images": [ + "images/task_00062_96_158_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000096", + "recording_id": null, + "episode_id": 96, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. approach\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/task_00062_96_1891_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000096", + "recording_id": null, + "episode_id": 96, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. approach\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. approach" + } + ], + "images": [ + "images/task_00062_96_66_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000096", + "recording_id": null, + "episode_id": 96, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00062_112_1555_camera_top.jpg", + "images/task_00062_112_1110_camera_top.jpg", + "images/task_00062_112_463_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000112", + "recording_id": null, + "episode_id": 112, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00062_112_865_camera_top.jpg", + "images/task_00062_112_1989_camera_top.jpg", + "images/task_00062_112_474_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000112", + "recording_id": null, + "episode_id": 112, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the left side of the scene\nA. toward the top of the scene\nB. toward the bottom of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the left side of the scene" + } + ], + "images": [ + "images/task_00062_112_696_camera_top.jpg", + "images/task_00062_112_701_camera_top.jpg", + "images/task_00062_112_706_camera_top.jpg", + "images/task_00062_112_711_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000112", + "recording_id": null, + "episode_id": 112, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. pre-approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/task_00062_112_486_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000112", + "recording_id": null, + "episode_id": 112, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_00062_112_485_camera_top.jpg", + "images/task_00062_112_1031_camera_top.jpg", + "images/task_00062_112_689_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000112", + "recording_id": null, + "episode_id": 112, + "camera": "camera_top", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_00062_112_1165_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000112", + "recording_id": null, + "episode_id": 112, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00062_112_1132_camera_top.jpg", + "images/task_00062_112_473_camera_top.jpg", + "images/task_00062_112_653_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000112", + "recording_id": null, + "episode_id": 112, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. pre-approach\nC. release\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. pre-approach" + } + ], + "images": [ + "images/task_00062_112_401_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000112", + "recording_id": null, + "episode_id": 112, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00062_112_1006_camera_top.jpg", + "images/task_00062_112_1009_camera_top.jpg", + "images/task_00062_112_1012_camera_top.jpg", + "images/task_00062_112_1015_camera_top.jpg", + "images/task_00062_112_1018_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000112", + "recording_id": null, + "episode_id": 112, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00062_112_1106_camera_top.jpg", + "images/task_00062_112_482_camera_top.jpg", + "images/task_00062_112_2110_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000112", + "recording_id": null, + "episode_id": 112, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. release\nC. approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/task_00062_117_2185_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000117", + "recording_id": null, + "episode_id": 117, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. transfer\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. approach" + } + ], + "images": [ + "images/task_00062_117_202_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000117", + "recording_id": null, + "episode_id": 117, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. release\nC. transfer\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/task_00062_117_2047_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000117", + "recording_id": null, + "episode_id": 117, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the left side of the scene\nB. toward the bottom of the scene\nC. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00062_125_1456_camera_top.jpg", + "images/task_00062_125_1461_camera_top.jpg", + "images/task_00062_125_1466_camera_top.jpg", + "images/task_00062_125_1471_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00062", + "split": "eval", + "split_group_id": "task_00062__episode_000125", + "recording_id": null, + "episode_id": 125, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the top of the scene\nC. toward the left side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00063_15_278_camera_top.jpg", + "images/task_00063_15_283_camera_top.jpg", + "images/task_00063_15_288_camera_top.jpg", + "images/task_00063_15_293_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00063", + "split": "eval", + "split_group_id": "task_00063__episode_000015", + "recording_id": null, + "episode_id": 15, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_00063_29_193_camera_top.jpg", + "images/task_00063_29_253_camera_top.jpg", + "images/task_00063_29_161_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00063", + "split": "eval", + "split_group_id": "task_00063__episode_000029", + "recording_id": null, + "episode_id": 29, + "camera": "camera_top", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the left side of the scene\nA. toward the bottom of the scene\nC. toward the right side of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the left side of the scene" + } + ], + "images": [ + "images/task_00063_58_249_camera_top.jpg", + "images/task_00063_58_254_camera_top.jpg", + "images/task_00063_58_259_camera_top.jpg", + "images/task_00063_58_264_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00063", + "split": "eval", + "split_group_id": "task_00063__episode_000058", + "recording_id": null, + "episode_id": 58, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00063_61_379_camera_top.jpg", + "images/task_00063_61_411_camera_top.jpg", + "images/task_00063_61_476_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00063", + "split": "eval", + "split_group_id": "task_00063__episode_000061", + "recording_id": null, + "episode_id": 61, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00063_74_453_camera_top.jpg", + "images/task_00063_74_456_camera_top.jpg", + "images/task_00063_74_459_camera_top.jpg", + "images/task_00063_74_462_camera_top.jpg", + "images/task_00063_74_465_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00063", + "split": "eval", + "split_group_id": "task_00063__episode_000074", + "recording_id": null, + "episode_id": 74, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. hold and carry\nC. contact\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/task_00063_106_250_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00063", + "split": "eval", + "split_group_id": "task_00063__episode_000106", + "recording_id": null, + "episode_id": 106, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00063_108_112_camera_top.jpg", + "images/task_00063_108_294_camera_top.jpg", + "images/task_00063_108_207_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00063", + "split": "eval", + "split_group_id": "task_00063__episode_000108", + "recording_id": null, + "episode_id": 108, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. contact\nC. approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. hold and carry" + } + ], + "images": [ + "images/task_00064_20_244_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00064", + "split": "eval", + "split_group_id": "task_00064__episode_000020", + "recording_id": null, + "episode_id": 20, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. contact\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/task_00064_20_779_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00064", + "split": "eval", + "split_group_id": "task_00064__episode_000020", + "recording_id": null, + "episode_id": 20, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the bottom of the scene\nB. toward the top of the scene\nC. toward the left side of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00064_33_197_camera_top.jpg", + "images/task_00064_33_202_camera_top.jpg", + "images/task_00064_33_207_camera_top.jpg", + "images/task_00064_33_212_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00064", + "split": "eval", + "split_group_id": "task_00064__episode_000033", + "recording_id": null, + "episode_id": 33, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. pre-approach" + } + ], + "images": [ + "images/task_00064_36_183_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00064", + "split": "eval", + "split_group_id": "task_00064__episode_000036", + "recording_id": null, + "episode_id": 36, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. transfer\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. pre-approach" + } + ], + "images": [ + "images/task_00064_50_116_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00064", + "split": "eval", + "split_group_id": "task_00064__episode_000050", + "recording_id": null, + "episode_id": 50, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_00064_53_884_camera_top.jpg", + "images/task_00064_53_1530_camera_top.jpg", + "images/task_00064_53_209_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00064", + "split": "eval", + "split_group_id": "task_00064__episode_000053", + "recording_id": null, + "episode_id": 53, + "camera": "camera_top", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_00064_53_1272_camera_top.jpg", + "images/task_00064_53_1822_camera_top.jpg", + "images/task_00064_53_183_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00064", + "split": "eval", + "split_group_id": "task_00064__episode_000053", + "recording_id": null, + "episode_id": 53, + "camera": "camera_top", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00064_53_1354_camera_top.jpg", + "images/task_00064_53_1425_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00064", + "split": "eval", + "split_group_id": "task_00064__episode_000053", + "recording_id": null, + "episode_id": 53, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00064_53_832_camera_top_t-6.jpg", + "images/task_00064_53_835_camera_top_t-3.jpg", + "images/task_00064_53_838_camera_top_t0.jpg", + "images/task_00064_53_841_camera_top_t+3.jpg", + "images/task_00064_53_844_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00064", + "split": "eval", + "split_group_id": "task_00064__episode_000053", + "recording_id": null, + "episode_id": 53, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00064_53_1124_camera_top_t-6.jpg", + "images/task_00064_53_1127_camera_top_t-3.jpg", + "images/task_00064_53_1130_camera_top_t0.jpg", + "images/task_00064_53_1133_camera_top_t+3.jpg", + "images/task_00064_53_1136_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00064", + "split": "eval", + "split_group_id": "task_00064__episode_000053", + "recording_id": null, + "episode_id": 53, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00064_53_2144_camera_top.jpg", + "images/task_00064_53_1733_camera_top.jpg", + "images/task_00064_53_1765_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00064", + "split": "eval", + "split_group_id": "task_00064__episode_000053", + "recording_id": null, + "episode_id": 53, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. hold and carry\nC. pre-approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. pre-approach" + } + ], + "images": [ + "images/task_00064_80_92_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00064", + "split": "eval", + "split_group_id": "task_00064__episode_000080", + "recording_id": null, + "episode_id": 80, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00064_87_315_camera_top.jpg", + "images/task_00064_87_318_camera_top.jpg", + "images/task_00064_87_321_camera_top.jpg", + "images/task_00064_87_324_camera_top.jpg", + "images/task_00064_87_327_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00064", + "split": "eval", + "split_group_id": "task_00064__episode_000087", + "recording_id": null, + "episode_id": 87, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the top of the scene\nA. toward the bottom of the scene\nC. toward the left side of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the top of the scene" + } + ], + "images": [ + "images/task_00064_120_1801_camera_top.jpg", + "images/task_00064_120_1806_camera_top.jpg", + "images/task_00064_120_1811_camera_top.jpg", + "images/task_00064_120_1816_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00064", + "split": "eval", + "split_group_id": "task_00064__episode_000120", + "recording_id": null, + "episode_id": 120, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the bottom of the scene\nA. toward the right side of the scene\nB. toward the top of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00064_157_275_camera_top.jpg", + "images/task_00064_157_280_camera_top.jpg", + "images/task_00064_157_285_camera_top.jpg", + "images/task_00064_157_290_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00064", + "split": "eval", + "split_group_id": "task_00064__episode_000157", + "recording_id": null, + "episode_id": 157, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00065_10_445_camera_top.jpg", + "images/task_00065_10_448_camera_top.jpg", + "images/task_00065_10_451_camera_top.jpg", + "images/task_00065_10_454_camera_top.jpg", + "images/task_00065_10_457_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00065", + "split": "eval", + "split_group_id": "task_00065__episode_000010", + "recording_id": null, + "episode_id": 10, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the right side of the scene\nB. toward the bottom of the scene\nC. toward the left side of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the right side of the scene" + } + ], + "images": [ + "images/task_00065_28_731_camera_top.jpg", + "images/task_00065_28_736_camera_top.jpg", + "images/task_00065_28_741_camera_top.jpg", + "images/task_00065_28_746_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00065", + "split": "eval", + "split_group_id": "task_00065__episode_000028", + "recording_id": null, + "episode_id": 28, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. transfer\nC. hold and carry\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. hold and carry" + } + ], + "images": [ + "images/task_00065_52_651_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00065", + "split": "eval", + "split_group_id": "task_00065__episode_000052", + "recording_id": null, + "episode_id": 52, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. hold and carry\nC. approach\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. pre-approach" + } + ], + "images": [ + "images/task_00065_65_349_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00065", + "split": "eval", + "split_group_id": "task_00065__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00066_0_541_camera_top.jpg", + "images/task_00066_0_684_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00066", + "split": "eval", + "split_group_id": "task_00066__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. release\nC. transfer\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/task_00066_0_734_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00066", + "split": "eval", + "split_group_id": "task_00066__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00066_0_247_camera_top.jpg", + "images/task_00066_0_250_camera_top.jpg", + "images/task_00066_0_253_camera_top.jpg", + "images/task_00066_0_256_camera_top.jpg", + "images/task_00066_0_259_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00066", + "split": "eval", + "split_group_id": "task_00066__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00066_0_961_camera_top.jpg", + "images/task_00066_0_964_camera_top.jpg", + "images/task_00066_0_967_camera_top.jpg", + "images/task_00066_0_970_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00066", + "split": "eval", + "split_group_id": "task_00066__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00066_0_377_camera_top.jpg", + "images/task_00066_0_380_camera_top.jpg", + "images/task_00066_0_383_camera_top.jpg", + "images/task_00066_0_386_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00066", + "split": "eval", + "split_group_id": "task_00066__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00066_0_164_camera_top.jpg", + "images/task_00066_0_167_camera_top.jpg", + "images/task_00066_0_170_camera_top.jpg", + "images/task_00066_0_173_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00066", + "split": "eval", + "split_group_id": "task_00066__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00066_0_376_camera_top.jpg", + "images/task_00066_0_379_camera_top.jpg", + "images/task_00066_0_382_camera_top.jpg", + "images/task_00066_0_385_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00066", + "split": "eval", + "split_group_id": "task_00066__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00066_0_109_camera_top.jpg", + "images/task_00066_0_112_camera_top.jpg", + "images/task_00066_0_115_camera_top.jpg", + "images/task_00066_0_118_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00066", + "split": "eval", + "split_group_id": "task_00066__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00066_0_798_camera_top.jpg", + "images/task_00066_0_801_camera_top.jpg", + "images/task_00066_0_804_camera_top.jpg", + "images/task_00066_0_807_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00066", + "split": "eval", + "split_group_id": "task_00066__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00066_25_76_camera_top.jpg", + "images/task_00066_25_79_camera_top.jpg", + "images/task_00066_25_82_camera_top.jpg", + "images/task_00066_25_85_camera_top.jpg", + "images/task_00066_25_88_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00066", + "split": "eval", + "split_group_id": "task_00066__episode_000025", + "recording_id": null, + "episode_id": 25, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00066_47_451_camera_top.jpg", + "images/task_00066_47_454_camera_top.jpg", + "images/task_00066_47_457_camera_top.jpg", + "images/task_00066_47_460_camera_top.jpg", + "images/task_00066_47_463_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00066", + "split": "eval", + "split_group_id": "task_00066__episode_000047", + "recording_id": null, + "episode_id": 47, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_00066_78_170_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00066", + "split": "eval", + "split_group_id": "task_00066__episode_000078", + "recording_id": null, + "episode_id": 78, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00066_78_375_camera_top.jpg", + "images/task_00066_78_197_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00066", + "split": "eval", + "split_group_id": "task_00066__episode_000078", + "recording_id": null, + "episode_id": 78, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00066_78_600_camera_top.jpg", + "images/task_00066_78_603_camera_top.jpg", + "images/task_00066_78_606_camera_top.jpg", + "images/task_00066_78_609_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00066", + "split": "eval", + "split_group_id": "task_00066__episode_000078", + "recording_id": null, + "episode_id": 78, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00066_78_204_camera_top.jpg", + "images/task_00066_78_207_camera_top.jpg", + "images/task_00066_78_210_camera_top.jpg", + "images/task_00066_78_213_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00066", + "split": "eval", + "split_group_id": "task_00066__episode_000078", + "recording_id": null, + "episode_id": 78, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00066_78_231_camera_top.jpg", + "images/task_00066_78_234_camera_top.jpg", + "images/task_00066_78_237_camera_top.jpg", + "images/task_00066_78_240_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00066", + "split": "eval", + "split_group_id": "task_00066__episode_000078", + "recording_id": null, + "episode_id": 78, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_00066_78_162_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00066", + "split": "eval", + "split_group_id": "task_00066__episode_000078", + "recording_id": null, + "episode_id": 78, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00066_78_173_camera_top.jpg", + "images/task_00066_78_176_camera_top.jpg", + "images/task_00066_78_179_camera_top.jpg", + "images/task_00066_78_182_camera_top.jpg", + "images/task_00066_78_185_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00066", + "split": "eval", + "split_group_id": "task_00066__episode_000078", + "recording_id": null, + "episode_id": 78, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. release\nC. approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. approach" + } + ], + "images": [ + "images/task_00066_109_63_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00066", + "split": "eval", + "split_group_id": "task_00066__episode_000109", + "recording_id": null, + "episode_id": 109, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the top of the scene\nA. toward the bottom of the scene\nB. toward the left side of the scene\nC. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the top of the scene" + } + ], + "images": [ + "images/task_00067_2_790_camera_top.jpg", + "images/task_00067_2_795_camera_top.jpg", + "images/task_00067_2_800_camera_top.jpg", + "images/task_00067_2_805_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00067", + "split": "eval", + "split_group_id": "task_00067__episode_000002", + "recording_id": null, + "episode_id": 2, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the top of the scene\nB. toward the bottom of the scene\nC. toward the left side of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the top of the scene" + } + ], + "images": [ + "images/task_00067_5_1492_camera_top.jpg", + "images/task_00067_5_1497_camera_top.jpg", + "images/task_00067_5_1502_camera_top.jpg", + "images/task_00067_5_1507_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00067", + "split": "eval", + "split_group_id": "task_00067__episode_000005", + "recording_id": null, + "episode_id": 5, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. approach\nC. transfer\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/task_00067_26_1756_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00067", + "split": "eval", + "split_group_id": "task_00067__episode_000026", + "recording_id": null, + "episode_id": 26, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. transfer\nC. pre-approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/task_00067_39_1537_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00067", + "split": "eval", + "split_group_id": "task_00067__episode_000039", + "recording_id": null, + "episode_id": 39, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. release\nC. transfer\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/task_00067_52_1074_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00067", + "split": "eval", + "split_group_id": "task_00067__episode_000052", + "recording_id": null, + "episode_id": 52, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. hold and carry\nC. approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. hold and carry" + } + ], + "images": [ + "images/task_00067_52_209_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00067", + "split": "eval", + "split_group_id": "task_00067__episode_000052", + "recording_id": null, + "episode_id": 52, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. transfer\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. approach" + } + ], + "images": [ + "images/task_00067_52_113_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00067", + "split": "eval", + "split_group_id": "task_00067__episode_000052", + "recording_id": null, + "episode_id": 52, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00067_65_167_camera_top.jpg", + "images/task_00067_65_170_camera_top.jpg", + "images/task_00067_65_173_camera_top.jpg", + "images/task_00067_65_176_camera_top.jpg", + "images/task_00067_65_179_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00067", + "split": "eval", + "split_group_id": "task_00067__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00067_86_1040_camera_top.jpg", + "images/task_00067_86_204_camera_top.jpg", + "images/task_00067_86_854_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00067", + "split": "eval", + "split_group_id": "task_00067__episode_000086", + "recording_id": null, + "episode_id": 86, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the left side of the scene\nA. toward the top of the scene\nB. toward the bottom of the scene\nC. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the left side of the scene" + } + ], + "images": [ + "images/task_00067_107_1601_camera_top.jpg", + "images/task_00067_107_1606_camera_top.jpg", + "images/task_00067_107_1611_camera_top.jpg", + "images/task_00067_107_1616_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00067", + "split": "eval", + "split_group_id": "task_00067__episode_000107", + "recording_id": null, + "episode_id": 107, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the top of the scene\nB. toward the bottom of the scene\nC. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00067_109_352_camera_top.jpg", + "images/task_00067_109_357_camera_top.jpg", + "images/task_00067_109_362_camera_top.jpg", + "images/task_00067_109_367_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00067", + "split": "eval", + "split_group_id": "task_00067__episode_000109", + "recording_id": null, + "episode_id": 109, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the bottom of the scene\nA. toward the top of the scene\nB. toward the left side of the scene\nC. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00067_109_882_camera_top.jpg", + "images/task_00067_109_887_camera_top.jpg", + "images/task_00067_109_892_camera_top.jpg", + "images/task_00067_109_897_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00067", + "split": "eval", + "split_group_id": "task_00067__episode_000109", + "recording_id": null, + "episode_id": 109, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the right side of the scene\nA. toward the left side of the scene\nB. toward the bottom of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the right side of the scene" + } + ], + "images": [ + "images/task_00067_109_354_camera_top.jpg", + "images/task_00067_109_359_camera_top.jpg", + "images/task_00067_109_364_camera_top.jpg", + "images/task_00067_109_369_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00067", + "split": "eval", + "split_group_id": "task_00067__episode_000109", + "recording_id": null, + "episode_id": 109, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. pre-approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. pre-approach" + } + ], + "images": [ + "images/task_00067_109_105_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00067", + "split": "eval", + "split_group_id": "task_00067__episode_000109", + "recording_id": null, + "episode_id": 109, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the top of the scene\nB. toward the left side of the scene\nC. toward the right side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the top of the scene" + } + ], + "images": [ + "images/task_00067_112_713_camera_top.jpg", + "images/task_00067_112_718_camera_top.jpg", + "images/task_00067_112_723_camera_top.jpg", + "images/task_00067_112_728_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00067", + "split": "eval", + "split_group_id": "task_00067__episode_000112", + "recording_id": null, + "episode_id": 112, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00067_122_409_camera_top.jpg", + "images/task_00067_122_412_camera_top.jpg", + "images/task_00067_122_415_camera_top.jpg", + "images/task_00067_122_418_camera_top.jpg", + "images/task_00067_122_421_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00067", + "split": "eval", + "split_group_id": "task_00067__episode_000122", + "recording_id": null, + "episode_id": 122, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the left side of the scene\nB. toward the bottom of the scene\nC. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00067_128_1703_camera_top.jpg", + "images/task_00067_128_1708_camera_top.jpg", + "images/task_00067_128_1713_camera_top.jpg", + "images/task_00067_128_1718_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00067", + "split": "eval", + "split_group_id": "task_00067__episode_000128", + "recording_id": null, + "episode_id": 128, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00068_10_262_camera_top.jpg", + "images/task_00068_10_265_camera_top.jpg", + "images/task_00068_10_268_camera_top.jpg", + "images/task_00068_10_271_camera_top.jpg", + "images/task_00068_10_274_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00068", + "split": "eval", + "split_group_id": "task_00068__episode_000010", + "recording_id": null, + "episode_id": 10, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00068_55_188_camera_top.jpg", + "images/task_00068_55_191_camera_top.jpg", + "images/task_00068_55_194_camera_top.jpg", + "images/task_00068_55_197_camera_top.jpg", + "images/task_00068_55_200_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00068", + "split": "eval", + "split_group_id": "task_00068__episode_000055", + "recording_id": null, + "episode_id": 55, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00068_58_338_camera_top.jpg", + "images/task_00068_58_341_camera_top.jpg", + "images/task_00068_58_344_camera_top.jpg", + "images/task_00068_58_347_camera_top.jpg", + "images/task_00068_58_350_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00068", + "split": "eval", + "split_group_id": "task_00068__episode_000058", + "recording_id": null, + "episode_id": 58, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00068_58_676_camera_top_t-6.jpg", + "images/task_00068_58_679_camera_top_t-3.jpg", + "images/task_00068_58_682_camera_top_t0.jpg", + "images/task_00068_58_685_camera_top_t+3.jpg", + "images/task_00068_58_688_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00068", + "split": "eval", + "split_group_id": "task_00068__episode_000058", + "recording_id": null, + "episode_id": 58, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00068_58_714_camera_top.jpg", + "images/task_00068_58_568_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00068", + "split": "eval", + "split_group_id": "task_00068__episode_000058", + "recording_id": null, + "episode_id": 58, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00068_58_464_camera_top.jpg", + "images/task_00068_58_568_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00068", + "split": "eval", + "split_group_id": "task_00068__episode_000058", + "recording_id": null, + "episode_id": 58, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00068_58_342_camera_top.jpg", + "images/task_00068_58_269_camera_top.jpg", + "images/task_00068_58_650_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00068", + "split": "eval", + "split_group_id": "task_00068__episode_000058", + "recording_id": null, + "episode_id": 58, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the left side of the scene\nA. toward the right side of the scene\nC. toward the top of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the left side of the scene" + } + ], + "images": [ + "images/task_00068_58_207_camera_top.jpg", + "images/task_00068_58_212_camera_top.jpg", + "images/task_00068_58_217_camera_top.jpg", + "images/task_00068_58_222_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00068", + "split": "eval", + "split_group_id": "task_00068__episode_000058", + "recording_id": null, + "episode_id": 58, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00068_58_392_camera_top.jpg", + "images/task_00068_58_395_camera_top.jpg", + "images/task_00068_58_398_camera_top.jpg", + "images/task_00068_58_401_camera_top.jpg", + "images/task_00068_58_404_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00068", + "split": "eval", + "split_group_id": "task_00068__episode_000058", + "recording_id": null, + "episode_id": 58, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00068_76_303_camera_top.jpg", + "images/task_00068_76_306_camera_top.jpg", + "images/task_00068_76_309_camera_top.jpg", + "images/task_00068_76_312_camera_top.jpg", + "images/task_00068_76_315_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00068", + "split": "eval", + "split_group_id": "task_00068__episode_000076", + "recording_id": null, + "episode_id": 76, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the left side of the scene\nA. toward the right side of the scene\nC. toward the top of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the left side of the scene" + } + ], + "images": [ + "images/task_00068_106_726_camera_top.jpg", + "images/task_00068_106_731_camera_top.jpg", + "images/task_00068_106_736_camera_top.jpg", + "images/task_00068_106_741_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00068", + "split": "eval", + "split_group_id": "task_00068__episode_000106", + "recording_id": null, + "episode_id": 106, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00069_68_1166_camera_top.jpg", + "images/task_00069_68_1169_camera_top.jpg", + "images/task_00069_68_1172_camera_top.jpg", + "images/task_00069_68_1175_camera_top.jpg", + "images/task_00069_68_1178_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00069", + "split": "eval", + "split_group_id": "task_00069__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_00069_68_977_camera_top.jpg", + "images/task_00069_68_1219_camera_top.jpg", + "images/task_00069_68_2314_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00069", + "split": "eval", + "split_group_id": "task_00069__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00069_68_824_camera_top.jpg", + "images/task_00069_68_827_camera_top.jpg", + "images/task_00069_68_830_camera_top.jpg", + "images/task_00069_68_833_camera_top.jpg", + "images/task_00069_68_836_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00069", + "split": "eval", + "split_group_id": "task_00069__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00069_68_1632_camera_top.jpg", + "images/task_00069_68_1723_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00069", + "split": "eval", + "split_group_id": "task_00069__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00069_68_1702_camera_top.jpg", + "images/task_00069_68_1705_camera_top.jpg", + "images/task_00069_68_1708_camera_top.jpg", + "images/task_00069_68_1711_camera_top.jpg", + "images/task_00069_68_1714_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00069", + "split": "eval", + "split_group_id": "task_00069__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the top of the scene\nB. toward the right side of the scene\nC. toward the bottom of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the top of the scene" + } + ], + "images": [ + "images/task_00069_68_2617_camera_top.jpg", + "images/task_00069_68_2622_camera_top.jpg", + "images/task_00069_68_2627_camera_top.jpg", + "images/task_00069_68_2632_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00069", + "split": "eval", + "split_group_id": "task_00069__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_00069_68_2315_camera_top.jpg", + "images/task_00069_68_1847_camera_top.jpg", + "images/task_00069_68_327_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00069", + "split": "eval", + "split_group_id": "task_00069__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. contact\nC. pre-approach\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. pre-approach" + } + ], + "images": [ + "images/task_00069_68_259_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00069", + "split": "eval", + "split_group_id": "task_00069__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. contact\nC. transfer\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. pre-approach" + } + ], + "images": [ + "images/task_00069_94_60_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00069", + "split": "eval", + "split_group_id": "task_00069__episode_000094", + "recording_id": null, + "episode_id": 94, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the bottom of the scene\nB. toward the left side of the scene\nC. toward the right side of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00069_129_237_camera_top.jpg", + "images/task_00069_129_242_camera_top.jpg", + "images/task_00069_129_247_camera_top.jpg", + "images/task_00069_129_252_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00069", + "split": "eval", + "split_group_id": "task_00069__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the top of the scene\nC. toward the left side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00069_132_323_camera_top.jpg", + "images/task_00069_132_328_camera_top.jpg", + "images/task_00069_132_333_camera_top.jpg", + "images/task_00069_132_338_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00069", + "split": "eval", + "split_group_id": "task_00069__episode_000132", + "recording_id": null, + "episode_id": 132, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the top of the scene\nA. toward the right side of the scene\nC. toward the left side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the top of the scene" + } + ], + "images": [ + "images/task_00069_144_623_camera_top.jpg", + "images/task_00069_144_628_camera_top.jpg", + "images/task_00069_144_633_camera_top.jpg", + "images/task_00069_144_638_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00069", + "split": "eval", + "split_group_id": "task_00069__episode_000144", + "recording_id": null, + "episode_id": 144, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the bottom of the scene\nB. toward the top of the scene\nC. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00069_144_396_camera_top.jpg", + "images/task_00069_144_401_camera_top.jpg", + "images/task_00069_144_406_camera_top.jpg", + "images/task_00069_144_411_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00069", + "split": "eval", + "split_group_id": "task_00069__episode_000144", + "recording_id": null, + "episode_id": 144, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the bottom of the scene\nA. toward the left side of the scene\nB. toward the right side of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00069_144_360_camera_top.jpg", + "images/task_00069_144_365_camera_top.jpg", + "images/task_00069_144_370_camera_top.jpg", + "images/task_00069_144_375_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00069", + "split": "eval", + "split_group_id": "task_00069__episode_000144", + "recording_id": null, + "episode_id": 144, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. contact\nC. pre-approach\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/task_00069_144_189_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00069", + "split": "eval", + "split_group_id": "task_00069__episode_000144", + "recording_id": null, + "episode_id": 144, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00069_144_345_camera_top_t-6.jpg", + "images/task_00069_144_348_camera_top_t-3.jpg", + "images/task_00069_144_351_camera_top_t0.jpg", + "images/task_00069_144_351_camera_top_t+3.jpg", + "images/task_00069_144_357_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00069", + "split": "eval", + "split_group_id": "task_00069__episode_000144", + "recording_id": null, + "episode_id": 144, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the bottom of the scene\nA. toward the left side of the scene\nB. toward the top of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00069_144_436_camera_top.jpg", + "images/task_00069_144_441_camera_top.jpg", + "images/task_00069_144_446_camera_top.jpg", + "images/task_00069_144_451_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00069", + "split": "eval", + "split_group_id": "task_00069__episode_000144", + "recording_id": null, + "episode_id": 144, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. transfer\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/task_00069_178_294_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00069", + "split": "eval", + "split_group_id": "task_00069__episode_000178", + "recording_id": null, + "episode_id": 178, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. hold and carry\nC. approach\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. approach" + } + ], + "images": [ + "images/task_00070_13_420_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00070", + "split": "eval", + "split_group_id": "task_00070__episode_000013", + "recording_id": null, + "episode_id": 13, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00070_34_211_camera_top.jpg", + "images/task_00070_34_214_camera_top.jpg", + "images/task_00070_34_217_camera_top.jpg", + "images/task_00070_34_220_camera_top.jpg", + "images/task_00070_34_223_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00070", + "split": "eval", + "split_group_id": "task_00070__episode_000034", + "recording_id": null, + "episode_id": 34, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the left side of the scene\nC. toward the top of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00070_58_274_camera_top.jpg", + "images/task_00070_58_279_camera_top.jpg", + "images/task_00070_58_284_camera_top.jpg", + "images/task_00070_58_289_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00070", + "split": "eval", + "split_group_id": "task_00070__episode_000058", + "recording_id": null, + "episode_id": 58, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00070_74_318_camera_top.jpg", + "images/task_00070_74_321_camera_top.jpg", + "images/task_00070_74_324_camera_top.jpg", + "images/task_00070_74_327_camera_top.jpg", + "images/task_00070_74_330_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00070", + "split": "eval", + "split_group_id": "task_00070__episode_000074", + "recording_id": null, + "episode_id": 74, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00070_82_377_camera_top.jpg", + "images/task_00070_82_268_camera_top.jpg", + "images/task_00070_82_158_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00070", + "split": "eval", + "split_group_id": "task_00070__episode_000082", + "recording_id": null, + "episode_id": 82, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. hold and carry\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/task_00070_90_244_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00070", + "split": "eval", + "split_group_id": "task_00070__episode_000090", + "recording_id": null, + "episode_id": 90, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00070_111_71_camera_top.jpg", + "images/task_00070_111_74_camera_top.jpg", + "images/task_00070_111_77_camera_top.jpg", + "images/task_00070_111_80_camera_top.jpg", + "images/task_00070_111_83_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00070", + "split": "eval", + "split_group_id": "task_00070__episode_000111", + "recording_id": null, + "episode_id": 111, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00071_0_205_camera_top.jpg", + "images/task_00071_0_208_camera_top.jpg", + "images/task_00071_0_211_camera_top.jpg", + "images/task_00071_0_214_camera_top.jpg", + "images/task_00071_0_217_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00071", + "split": "eval", + "split_group_id": "task_00071__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00071_7_171_camera_top.jpg", + "images/task_00071_7_174_camera_top.jpg", + "images/task_00071_7_177_camera_top.jpg", + "images/task_00071_7_180_camera_top.jpg", + "images/task_00071_7_183_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00071", + "split": "eval", + "split_group_id": "task_00071__episode_000007", + "recording_id": null, + "episode_id": 7, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00071_12_173_camera_top.jpg", + "images/task_00071_12_176_camera_top.jpg", + "images/task_00071_12_179_camera_top.jpg", + "images/task_00071_12_182_camera_top.jpg", + "images/task_00071_12_185_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00071", + "split": "eval", + "split_group_id": "task_00071__episode_000012", + "recording_id": null, + "episode_id": 12, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. release\nC. hold and carry\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. hold and carry" + } + ], + "images": [ + "images/task_00071_28_160_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00071", + "split": "eval", + "split_group_id": "task_00071__episode_000028", + "recording_id": null, + "episode_id": 28, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00071_43_155_camera_top.jpg", + "images/task_00071_43_158_camera_top.jpg", + "images/task_00071_43_161_camera_top.jpg", + "images/task_00071_43_164_camera_top.jpg", + "images/task_00071_43_167_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00071", + "split": "eval", + "split_group_id": "task_00071__episode_000043", + "recording_id": null, + "episode_id": 43, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. release\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/task_00071_45_95_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00071", + "split": "eval", + "split_group_id": "task_00071__episode_000045", + "recording_id": null, + "episode_id": 45, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the top of the scene\nA. toward the bottom of the scene\nB. toward the left side of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the top of the scene" + } + ], + "images": [ + "images/task_00071_53_334_camera_top.jpg", + "images/task_00071_53_339_camera_top.jpg", + "images/task_00071_53_344_camera_top.jpg", + "images/task_00071_53_349_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00071", + "split": "eval", + "split_group_id": "task_00071__episode_000053", + "recording_id": null, + "episode_id": 53, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00071_68_690_camera_top.jpg", + "images/task_00071_68_304_camera_top.jpg", + "images/task_00071_68_1636_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00071", + "split": "eval", + "split_group_id": "task_00071__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00071_81_293_camera_top.jpg", + "images/task_00071_81_830_camera_top.jpg", + "images/task_00071_81_532_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00071", + "split": "eval", + "split_group_id": "task_00071__episode_000081", + "recording_id": null, + "episode_id": 81, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00071_81_837_camera_top.jpg", + "images/task_00071_81_277_camera_top.jpg", + "images/task_00071_81_474_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00071", + "split": "eval", + "split_group_id": "task_00071__episode_000081", + "recording_id": null, + "episode_id": 81, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00071_81_651_camera_top_t-6.jpg", + "images/task_00071_81_654_camera_top_t-3.jpg", + "images/task_00071_81_657_camera_top_t0.jpg", + "images/task_00071_81_660_camera_top_t+3.jpg", + "images/task_00071_81_663_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00071", + "split": "eval", + "split_group_id": "task_00071__episode_000081", + "recording_id": null, + "episode_id": 81, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. pre-approach\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. pre-approach" + } + ], + "images": [ + "images/task_00071_81_258_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00071", + "split": "eval", + "split_group_id": "task_00071__episode_000081", + "recording_id": null, + "episode_id": 81, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. contact\nC. approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/task_00071_81_349_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00071", + "split": "eval", + "split_group_id": "task_00071__episode_000081", + "recording_id": null, + "episode_id": 81, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_00071_81_350_camera_top.jpg", + "images/task_00071_81_405_camera_top.jpg", + "images/task_00071_81_600_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00071", + "split": "eval", + "split_group_id": "task_00071__episode_000081", + "recording_id": null, + "episode_id": 81, + "camera": "camera_top", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00071_81_651_camera_top.jpg", + "images/task_00071_81_413_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00071", + "split": "eval", + "split_group_id": "task_00071__episode_000081", + "recording_id": null, + "episode_id": 81, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. contact\nC. approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/task_00072_7_575_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00072", + "split": "eval", + "split_group_id": "task_00072__episode_000007", + "recording_id": null, + "episode_id": 7, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. release\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/task_00072_7_103_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00072", + "split": "eval", + "split_group_id": "task_00072__episode_000007", + "recording_id": null, + "episode_id": 7, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00072_7_192_camera_top.jpg", + "images/task_00072_7_195_camera_top.jpg", + "images/task_00072_7_198_camera_top.jpg", + "images/task_00072_7_201_camera_top.jpg", + "images/task_00072_7_204_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00072", + "split": "eval", + "split_group_id": "task_00072__episode_000007", + "recording_id": null, + "episode_id": 7, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the left side of the scene\nB. toward the bottom of the scene\nC. toward the right side of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the left side of the scene" + } + ], + "images": [ + "images/task_00072_7_192_camera_top.jpg", + "images/task_00072_7_197_camera_top.jpg", + "images/task_00072_7_202_camera_top.jpg", + "images/task_00072_7_207_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00072", + "split": "eval", + "split_group_id": "task_00072__episode_000007", + "recording_id": null, + "episode_id": 7, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/task_00072_7_603_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00072", + "split": "eval", + "split_group_id": "task_00072__episode_000007", + "recording_id": null, + "episode_id": 7, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00072_7_183_camera_top.jpg", + "images/task_00072_7_350_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00072", + "split": "eval", + "split_group_id": "task_00072__episode_000007", + "recording_id": null, + "episode_id": 7, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00072_7_143_camera_top.jpg", + "images/task_00072_7_321_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00072", + "split": "eval", + "split_group_id": "task_00072__episode_000007", + "recording_id": null, + "episode_id": 7, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the top of the scene\nB. toward the bottom of the scene\nC. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00072_7_501_camera_top.jpg", + "images/task_00072_7_506_camera_top.jpg", + "images/task_00072_7_511_camera_top.jpg", + "images/task_00072_7_516_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00072", + "split": "eval", + "split_group_id": "task_00072__episode_000007", + "recording_id": null, + "episode_id": 7, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_00072_7_499_camera_top.jpg", + "images/task_00072_7_321_camera_top.jpg", + "images/task_00072_7_387_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00072", + "split": "eval", + "split_group_id": "task_00072__episode_000007", + "recording_id": null, + "episode_id": 7, + "camera": "camera_top", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the right side of the scene\nB. toward the bottom of the scene\nC. toward the left side of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the right side of the scene" + } + ], + "images": [ + "images/task_00072_15_573_camera_top.jpg", + "images/task_00072_15_578_camera_top.jpg", + "images/task_00072_15_583_camera_top.jpg", + "images/task_00072_15_588_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00072", + "split": "eval", + "split_group_id": "task_00072__episode_000015", + "recording_id": null, + "episode_id": 15, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the left side of the scene\nA. toward the top of the scene\nB. toward the bottom of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the left side of the scene" + } + ], + "images": [ + "images/task_00072_58_159_camera_top.jpg", + "images/task_00072_58_164_camera_top.jpg", + "images/task_00072_58_169_camera_top.jpg", + "images/task_00072_58_174_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00072", + "split": "eval", + "split_group_id": "task_00072__episode_000058", + "recording_id": null, + "episode_id": 58, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00072_58_121_camera_top.jpg", + "images/task_00072_58_224_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00072", + "split": "eval", + "split_group_id": "task_00072__episode_000058", + "recording_id": null, + "episode_id": 58, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_00072_58_383_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00072", + "split": "eval", + "split_group_id": "task_00072__episode_000058", + "recording_id": null, + "episode_id": 58, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00072_58_125_camera_top.jpg", + "images/task_00072_58_166_camera_top.jpg", + "images/task_00072_58_453_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00072", + "split": "eval", + "split_group_id": "task_00072__episode_000058", + "recording_id": null, + "episode_id": 58, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_00072_58_58_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00072", + "split": "eval", + "split_group_id": "task_00072__episode_000058", + "recording_id": null, + "episode_id": 58, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_00072_58_290_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00072", + "split": "eval", + "split_group_id": "task_00072__episode_000058", + "recording_id": null, + "episode_id": 58, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00072_58_290_camera_top.jpg", + "images/task_00072_58_418_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00072", + "split": "eval", + "split_group_id": "task_00072__episode_000058", + "recording_id": null, + "episode_id": 58, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. hold and carry\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. hold and carry" + } + ], + "images": [ + "images/task_00072_61_387_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00072", + "split": "eval", + "split_group_id": "task_00072__episode_000061", + "recording_id": null, + "episode_id": 61, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00072_66_327_camera_top.jpg", + "images/task_00072_66_424_camera_top.jpg", + "images/task_00072_66_109_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00072", + "split": "eval", + "split_group_id": "task_00072__episode_000066", + "recording_id": null, + "episode_id": 66, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. release\nC. hold and carry\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/task_00072_82_217_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00072", + "split": "eval", + "split_group_id": "task_00072__episode_000082", + "recording_id": null, + "episode_id": 82, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the bottom of the scene\nA. toward the left side of the scene\nC. toward the top of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00072_98_301_camera_top.jpg", + "images/task_00072_98_306_camera_top.jpg", + "images/task_00072_98_311_camera_top.jpg", + "images/task_00072_98_316_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00072", + "split": "eval", + "split_group_id": "task_00072__episode_000098", + "recording_id": null, + "episode_id": 98, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the left side of the scene\nA. toward the right side of the scene\nB. toward the top of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the left side of the scene" + } + ], + "images": [ + "images/task_00073_5_234_camera_top.jpg", + "images/task_00073_5_239_camera_top.jpg", + "images/task_00073_5_244_camera_top.jpg", + "images/task_00073_5_249_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00073", + "split": "eval", + "split_group_id": "task_00073__episode_000005", + "recording_id": null, + "episode_id": 5, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00073_22_468_camera_top.jpg", + "images/task_00073_22_471_camera_top.jpg", + "images/task_00073_22_474_camera_top.jpg", + "images/task_00073_22_477_camera_top.jpg", + "images/task_00073_22_480_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00073", + "split": "eval", + "split_group_id": "task_00073__episode_000022", + "recording_id": null, + "episode_id": 22, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00073_33_637_camera_top.jpg", + "images/task_00073_33_640_camera_top.jpg", + "images/task_00073_33_643_camera_top.jpg", + "images/task_00073_33_646_camera_top.jpg", + "images/task_00073_33_649_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00073", + "split": "eval", + "split_group_id": "task_00073__episode_000033", + "recording_id": null, + "episode_id": 33, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the right side of the scene\nB. toward the left side of the scene\nC. toward the bottom of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the right side of the scene" + } + ], + "images": [ + "images/task_00073_55_230_camera_top.jpg", + "images/task_00073_55_235_camera_top.jpg", + "images/task_00073_55_240_camera_top.jpg", + "images/task_00073_55_245_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00073", + "split": "eval", + "split_group_id": "task_00073__episode_000055", + "recording_id": null, + "episode_id": 55, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. pre-approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. hold and carry" + } + ], + "images": [ + "images/task_00073_58_255_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00073", + "split": "eval", + "split_group_id": "task_00073__episode_000058", + "recording_id": null, + "episode_id": 58, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the left side of the scene\nB. toward the bottom of the scene\nC. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00073_83_2015_camera_top.jpg", + "images/task_00073_83_2020_camera_top.jpg", + "images/task_00073_83_2025_camera_top.jpg", + "images/task_00073_83_2030_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00073", + "split": "eval", + "split_group_id": "task_00073__episode_000083", + "recording_id": null, + "episode_id": 83, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_00073_83_774_camera_top.jpg", + "images/task_00073_83_234_camera_top.jpg", + "images/task_00073_83_1222_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00073", + "split": "eval", + "split_group_id": "task_00073__episode_000083", + "recording_id": null, + "episode_id": 83, + "camera": "camera_top", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00073_83_944_camera_top.jpg", + "images/task_00073_83_947_camera_top.jpg", + "images/task_00073_83_950_camera_top.jpg", + "images/task_00073_83_953_camera_top.jpg", + "images/task_00073_83_956_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00073", + "split": "eval", + "split_group_id": "task_00073__episode_000083", + "recording_id": null, + "episode_id": 83, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. hold and carry" + } + ], + "images": [ + "images/task_00073_83_891_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00073", + "split": "eval", + "split_group_id": "task_00073__episode_000083", + "recording_id": null, + "episode_id": 83, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00073_83_1963_camera_top.jpg", + "images/task_00073_83_2109_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00073", + "split": "eval", + "split_group_id": "task_00073__episode_000083", + "recording_id": null, + "episode_id": 83, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00073_83_392_camera_top.jpg", + "images/task_00073_83_218_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00073", + "split": "eval", + "split_group_id": "task_00073__episode_000083", + "recording_id": null, + "episode_id": 83, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00073_83_254_camera_top_t-6.jpg", + "images/task_00073_83_257_camera_top_t-3.jpg", + "images/task_00073_83_260_camera_top_t0.jpg", + "images/task_00073_83_263_camera_top_t+3.jpg", + "images/task_00073_83_266_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00073", + "split": "eval", + "split_group_id": "task_00073__episode_000083", + "recording_id": null, + "episode_id": 83, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the top of the scene\nC. toward the bottom of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00073_127_154_camera_top.jpg", + "images/task_00073_127_159_camera_top.jpg", + "images/task_00073_127_164_camera_top.jpg", + "images/task_00073_127_169_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00073", + "split": "eval", + "split_group_id": "task_00073__episode_000127", + "recording_id": null, + "episode_id": 127, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00073_136_1008_camera_top.jpg", + "images/task_00073_136_1011_camera_top.jpg", + "images/task_00073_136_1014_camera_top.jpg", + "images/task_00073_136_1017_camera_top.jpg", + "images/task_00073_136_1020_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00073", + "split": "eval", + "split_group_id": "task_00073__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_00074_58_753_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00074", + "split": "eval", + "split_group_id": "task_00074__episode_000058", + "recording_id": null, + "episode_id": 58, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00074_58_203_camera_top.jpg", + "images/task_00074_58_408_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00074", + "split": "eval", + "split_group_id": "task_00074__episode_000058", + "recording_id": null, + "episode_id": 58, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. hold and carry\nC. release\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. hold and carry" + } + ], + "images": [ + "images/task_00074_58_540_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00074", + "split": "eval", + "split_group_id": "task_00074__episode_000058", + "recording_id": null, + "episode_id": 58, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the left side of the scene\nB. toward the top of the scene\nC. toward the right side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the left side of the scene" + } + ], + "images": [ + "images/task_00074_58_1611_camera_top.jpg", + "images/task_00074_58_1616_camera_top.jpg", + "images/task_00074_58_1621_camera_top.jpg", + "images/task_00074_58_1626_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00074", + "split": "eval", + "split_group_id": "task_00074__episode_000058", + "recording_id": null, + "episode_id": 58, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00074_58_1682_camera_top.jpg", + "images/task_00074_58_1685_camera_top.jpg", + "images/task_00074_58_1688_camera_top.jpg", + "images/task_00074_58_1691_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00074", + "split": "eval", + "split_group_id": "task_00074__episode_000058", + "recording_id": null, + "episode_id": 58, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. release\nC. approach\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. pre-approach" + } + ], + "images": [ + "images/task_00074_58_171_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00074", + "split": "eval", + "split_group_id": "task_00074__episode_000058", + "recording_id": null, + "episode_id": 58, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00074_58_125_camera_top.jpg", + "images/task_00074_58_128_camera_top.jpg", + "images/task_00074_58_131_camera_top.jpg", + "images/task_00074_58_134_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00074", + "split": "eval", + "split_group_id": "task_00074__episode_000058", + "recording_id": null, + "episode_id": 58, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00074_58_993_camera_top_t-6.jpg", + "images/task_00074_58_996_camera_top_t-3.jpg", + "images/task_00074_58_999_camera_top_t0.jpg", + "images/task_00074_58_1002_camera_top_t+3.jpg", + "images/task_00074_58_1005_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00074", + "split": "eval", + "split_group_id": "task_00074__episode_000058", + "recording_id": null, + "episode_id": 58, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the right side of the scene\nA. toward the top of the scene\nB. toward the left side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the right side of the scene" + } + ], + "images": [ + "images/task_00074_58_646_camera_top.jpg", + "images/task_00074_58_651_camera_top.jpg", + "images/task_00074_58_656_camera_top.jpg", + "images/task_00074_58_661_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00074", + "split": "eval", + "split_group_id": "task_00074__episode_000058", + "recording_id": null, + "episode_id": 58, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00075_55_1213_camera_top.jpg", + "images/task_00075_55_484_camera_top.jpg", + "images/task_00075_55_1152_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00075", + "split": "eval", + "split_group_id": "task_00075__episode_000055", + "recording_id": null, + "episode_id": 55, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00075_55_192_camera_top.jpg", + "images/task_00075_55_195_camera_top.jpg", + "images/task_00075_55_198_camera_top.jpg", + "images/task_00075_55_201_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00075", + "split": "eval", + "split_group_id": "task_00075__episode_000055", + "recording_id": null, + "episode_id": 55, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00075_55_1131_camera_top_t-6.jpg", + "images/task_00075_55_1134_camera_top_t-3.jpg", + "images/task_00075_55_1137_camera_top_t0.jpg", + "images/task_00075_55_1140_camera_top_t+3.jpg", + "images/task_00075_55_1143_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00075", + "split": "eval", + "split_group_id": "task_00075__episode_000055", + "recording_id": null, + "episode_id": 55, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00075_55_651_camera_top.jpg", + "images/task_00075_55_654_camera_top.jpg", + "images/task_00075_55_657_camera_top.jpg", + "images/task_00075_55_660_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00075", + "split": "eval", + "split_group_id": "task_00075__episode_000055", + "recording_id": null, + "episode_id": 55, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the left side of the scene\nB. toward the top of the scene\nC. toward the right side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the left side of the scene" + } + ], + "images": [ + "images/task_00075_55_388_camera_top.jpg", + "images/task_00075_55_393_camera_top.jpg", + "images/task_00075_55_398_camera_top.jpg", + "images/task_00075_55_403_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00075", + "split": "eval", + "split_group_id": "task_00075__episode_000055", + "recording_id": null, + "episode_id": 55, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00075_55_1002_camera_top.jpg", + "images/task_00075_55_1005_camera_top.jpg", + "images/task_00075_55_1008_camera_top.jpg", + "images/task_00075_55_1011_camera_top.jpg", + "images/task_00075_55_1014_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00075", + "split": "eval", + "split_group_id": "task_00075__episode_000055", + "recording_id": null, + "episode_id": 55, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00075_55_226_camera_top.jpg", + "images/task_00075_55_333_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00075", + "split": "eval", + "split_group_id": "task_00075__episode_000055", + "recording_id": null, + "episode_id": 55, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00075_55_592_camera_top.jpg", + "images/task_00075_55_595_camera_top.jpg", + "images/task_00075_55_598_camera_top.jpg", + "images/task_00075_55_601_camera_top.jpg", + "images/task_00075_55_604_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00075", + "split": "eval", + "split_group_id": "task_00075__episode_000055", + "recording_id": null, + "episode_id": 55, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00075_55_132_camera_top.jpg", + "images/task_00075_55_135_camera_top.jpg", + "images/task_00075_55_138_camera_top.jpg", + "images/task_00075_55_141_camera_top.jpg", + "images/task_00075_55_144_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00075", + "split": "eval", + "split_group_id": "task_00075__episode_000055", + "recording_id": null, + "episode_id": 55, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. transfer\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/task_00075_193_709_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00075", + "split": "eval", + "split_group_id": "task_00075__episode_000193", + "recording_id": null, + "episode_id": 193, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_00075_233_749_camera_top.jpg", + "images/task_00075_233_310_camera_top.jpg", + "images/task_00075_233_1426_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00075", + "split": "eval", + "split_group_id": "task_00075__episode_000233", + "recording_id": null, + "episode_id": 233, + "camera": "camera_top", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the left side of the scene\nA. toward the right side of the scene\nB. toward the top of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the left side of the scene" + } + ], + "images": [ + "images/task_00076_18_106_camera_top.jpg", + "images/task_00076_18_111_camera_top.jpg", + "images/task_00076_18_116_camera_top.jpg", + "images/task_00076_18_121_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00076", + "split": "eval", + "split_group_id": "task_00076__episode_000018", + "recording_id": null, + "episode_id": 18, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/task_00076_20_690_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00076", + "split": "eval", + "split_group_id": "task_00076__episode_000020", + "recording_id": null, + "episode_id": 20, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. release\nC. approach\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/task_00076_31_593_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00076", + "split": "eval", + "split_group_id": "task_00076__episode_000031", + "recording_id": null, + "episode_id": 31, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. transfer\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/task_00076_38_544_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00076", + "split": "eval", + "split_group_id": "task_00076__episode_000038", + "recording_id": null, + "episode_id": 38, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. release\nC. approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/task_00076_80_356_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00076", + "split": "eval", + "split_group_id": "task_00076__episode_000080", + "recording_id": null, + "episode_id": 80, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. release\nC. approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. approach" + } + ], + "images": [ + "images/task_00076_95_106_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00076", + "split": "eval", + "split_group_id": "task_00076__episode_000095", + "recording_id": null, + "episode_id": 95, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the bottom of the scene\nB. toward the right side of the scene\nC. toward the top of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00076_119_332_camera_top.jpg", + "images/task_00076_119_337_camera_top.jpg", + "images/task_00076_119_342_camera_top.jpg", + "images/task_00076_119_347_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00076", + "split": "eval", + "split_group_id": "task_00076__episode_000119", + "recording_id": null, + "episode_id": 119, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_00077_14_523_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00077", + "split": "eval", + "split_group_id": "task_00077__episode_000014", + "recording_id": null, + "episode_id": 14, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00077_14_261_camera_top.jpg", + "images/task_00077_14_342_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00077", + "split": "eval", + "split_group_id": "task_00077__episode_000014", + "recording_id": null, + "episode_id": 14, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00077_14_326_camera_top.jpg", + "images/task_00077_14_191_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00077", + "split": "eval", + "split_group_id": "task_00077__episode_000014", + "recording_id": null, + "episode_id": 14, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. release\nC. transfer\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/task_00077_47_328_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00077", + "split": "eval", + "split_group_id": "task_00077__episode_000047", + "recording_id": null, + "episode_id": 47, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the left side of the scene\nB. toward the bottom of the scene\nC. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00077_51_484_camera_top.jpg", + "images/task_00077_51_489_camera_top.jpg", + "images/task_00077_51_494_camera_top.jpg", + "images/task_00077_51_499_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00077", + "split": "eval", + "split_group_id": "task_00077__episode_000051", + "recording_id": null, + "episode_id": 51, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00077_51_161_camera_top.jpg", + "images/task_00077_51_313_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00077", + "split": "eval", + "split_group_id": "task_00077__episode_000051", + "recording_id": null, + "episode_id": 51, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. transfer\nC. release\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. hold and carry" + } + ], + "images": [ + "images/task_00077_51_233_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00077", + "split": "eval", + "split_group_id": "task_00077__episode_000051", + "recording_id": null, + "episode_id": 51, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00077_51_383_camera_top.jpg", + "images/task_00077_51_245_camera_top.jpg", + "images/task_00077_51_194_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00077", + "split": "eval", + "split_group_id": "task_00077__episode_000051", + "recording_id": null, + "episode_id": 51, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00077_51_487_camera_top_t-6.jpg", + "images/task_00077_51_490_camera_top_t-3.jpg", + "images/task_00077_51_493_camera_top_t0.jpg", + "images/task_00077_51_496_camera_top_t+3.jpg", + "images/task_00077_51_499_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00077", + "split": "eval", + "split_group_id": "task_00077__episode_000051", + "recording_id": null, + "episode_id": 51, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00077_51_218_camera_top.jpg", + "images/task_00077_51_334_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00077", + "split": "eval", + "split_group_id": "task_00077__episode_000051", + "recording_id": null, + "episode_id": 51, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the top of the scene\nC. toward the bottom of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00077_51_486_camera_top.jpg", + "images/task_00077_51_491_camera_top.jpg", + "images/task_00077_51_496_camera_top.jpg", + "images/task_00077_51_501_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00077", + "split": "eval", + "split_group_id": "task_00077__episode_000051", + "recording_id": null, + "episode_id": 51, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. transfer\nC. approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/task_00077_58_418_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00077", + "split": "eval", + "split_group_id": "task_00077__episode_000058", + "recording_id": null, + "episode_id": 58, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. release\nC. approach\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/task_00077_84_650_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00077", + "split": "eval", + "split_group_id": "task_00077__episode_000084", + "recording_id": null, + "episode_id": 84, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. contact\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/task_00077_135_98_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00077", + "split": "eval", + "split_group_id": "task_00077__episode_000135", + "recording_id": null, + "episode_id": 135, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00078_29_141_camera_top_t-6.jpg", + "images/task_00078_29_144_camera_top_t-3.jpg", + "images/task_00078_29_147_camera_top_t0.jpg", + "images/task_00078_29_150_camera_top_t+3.jpg", + "images/task_00078_29_153_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00078", + "split": "eval", + "split_group_id": "task_00078__episode_000029", + "recording_id": null, + "episode_id": 29, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00078_29_188_camera_top.jpg", + "images/task_00078_29_191_camera_top.jpg", + "images/task_00078_29_194_camera_top.jpg", + "images/task_00078_29_197_camera_top.jpg", + "images/task_00078_29_200_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00078", + "split": "eval", + "split_group_id": "task_00078__episode_000029", + "recording_id": null, + "episode_id": 29, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the top of the scene\nC. toward the left side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00078_29_280_camera_top.jpg", + "images/task_00078_29_285_camera_top.jpg", + "images/task_00078_29_290_camera_top.jpg", + "images/task_00078_29_295_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00078", + "split": "eval", + "split_group_id": "task_00078__episode_000029", + "recording_id": null, + "episode_id": 29, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00078_29_140_camera_top.jpg", + "images/task_00078_29_239_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00078", + "split": "eval", + "split_group_id": "task_00078__episode_000029", + "recording_id": null, + "episode_id": 29, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. contact\nC. approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. approach" + } + ], + "images": [ + "images/task_00078_29_218_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00078", + "split": "eval", + "split_group_id": "task_00078__episode_000029", + "recording_id": null, + "episode_id": 29, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00078_29_141_camera_top.jpg", + "images/task_00078_29_144_camera_top.jpg", + "images/task_00078_29_147_camera_top.jpg", + "images/task_00078_29_150_camera_top.jpg", + "images/task_00078_29_153_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00078", + "split": "eval", + "split_group_id": "task_00078__episode_000029", + "recording_id": null, + "episode_id": 29, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00078_29_329_camera_top.jpg", + "images/task_00078_29_120_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00078", + "split": "eval", + "split_group_id": "task_00078__episode_000029", + "recording_id": null, + "episode_id": 29, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/task_00078_94_175_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00078", + "split": "eval", + "split_group_id": "task_00078__episode_000094", + "recording_id": null, + "episode_id": 94, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. transfer\nC. approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/task_00078_101_196_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00078", + "split": "eval", + "split_group_id": "task_00078__episode_000101", + "recording_id": null, + "episode_id": 101, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00078_116_204_camera_top.jpg", + "images/task_00078_116_104_camera_top.jpg", + "images/task_00078_116_171_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00078", + "split": "eval", + "split_group_id": "task_00078__episode_000116", + "recording_id": null, + "episode_id": 116, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the left side of the scene\nB. toward the top of the scene\nC. toward the right side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the left side of the scene" + } + ], + "images": [ + "images/task_00078_116_166_camera_top.jpg", + "images/task_00078_116_171_camera_top.jpg", + "images/task_00078_116_176_camera_top.jpg", + "images/task_00078_116_181_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00078", + "split": "eval", + "split_group_id": "task_00078__episode_000116", + "recording_id": null, + "episode_id": 116, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_00078_116_207_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00078", + "split": "eval", + "split_group_id": "task_00078__episode_000116", + "recording_id": null, + "episode_id": 116, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00078_116_191_camera_top.jpg", + "images/task_00078_116_135_camera_top.jpg", + "images/task_00078_116_275_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00078", + "split": "eval", + "split_group_id": "task_00078__episode_000116", + "recording_id": null, + "episode_id": 116, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the left side of the scene\nB. toward the bottom of the scene\nC. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00078_116_238_camera_top.jpg", + "images/task_00078_116_243_camera_top.jpg", + "images/task_00078_116_248_camera_top.jpg", + "images/task_00078_116_253_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00078", + "split": "eval", + "split_group_id": "task_00078__episode_000116", + "recording_id": null, + "episode_id": 116, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00078_116_107_camera_top.jpg", + "images/task_00078_116_215_camera_top.jpg", + "images/task_00078_116_164_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00078", + "split": "eval", + "split_group_id": "task_00078__episode_000116", + "recording_id": null, + "episode_id": 116, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00078_116_222_camera_top_t-6.jpg", + "images/task_00078_116_225_camera_top_t-3.jpg", + "images/task_00078_116_228_camera_top_t0.jpg", + "images/task_00078_116_231_camera_top_t+3.jpg", + "images/task_00078_116_234_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00078", + "split": "eval", + "split_group_id": "task_00078__episode_000116", + "recording_id": null, + "episode_id": 116, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the left side of the scene\nA. toward the bottom of the scene\nB. toward the right side of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the left side of the scene" + } + ], + "images": [ + "images/task_00078_116_161_camera_top.jpg", + "images/task_00078_116_166_camera_top.jpg", + "images/task_00078_116_171_camera_top.jpg", + "images/task_00078_116_176_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00078", + "split": "eval", + "split_group_id": "task_00078__episode_000116", + "recording_id": null, + "episode_id": 116, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. pre-approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. pre-approach" + } + ], + "images": [ + "images/task_00078_116_73_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00078", + "split": "eval", + "split_group_id": "task_00078__episode_000116", + "recording_id": null, + "episode_id": 116, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00078_116_208_camera_top.jpg", + "images/task_00078_116_133_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00078", + "split": "eval", + "split_group_id": "task_00078__episode_000116", + "recording_id": null, + "episode_id": 116, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. contact\nC. release\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. hold and carry" + } + ], + "images": [ + "images/task_00078_134_117_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00078", + "split": "eval", + "split_group_id": "task_00078__episode_000134", + "recording_id": null, + "episode_id": 134, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00078_145_243_camera_top.jpg", + "images/task_00078_145_142_camera_top.jpg", + "images/task_00078_145_179_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00078", + "split": "eval", + "split_group_id": "task_00078__episode_000145", + "recording_id": null, + "episode_id": 145, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00078_145_136_camera_top.jpg", + "images/task_00078_145_417_camera_top.jpg", + "images/task_00078_145_251_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00078", + "split": "eval", + "split_group_id": "task_00078__episode_000145", + "recording_id": null, + "episode_id": 145, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00078_145_198_camera_top.jpg", + "images/task_00078_145_201_camera_top.jpg", + "images/task_00078_145_204_camera_top.jpg", + "images/task_00078_145_207_camera_top.jpg", + "images/task_00078_145_210_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00078", + "split": "eval", + "split_group_id": "task_00078__episode_000145", + "recording_id": null, + "episode_id": 145, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. release\nC. approach\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. pre-approach" + } + ], + "images": [ + "images/task_00078_145_104_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00078", + "split": "eval", + "split_group_id": "task_00078__episode_000145", + "recording_id": null, + "episode_id": 145, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00078_145_246_camera_top.jpg", + "images/task_00078_145_174_camera_top.jpg", + "images/task_00078_145_118_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00078", + "split": "eval", + "split_group_id": "task_00078__episode_000145", + "recording_id": null, + "episode_id": 145, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the top of the scene\nB. toward the left side of the scene\nC. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00078_145_248_camera_top.jpg", + "images/task_00078_145_253_camera_top.jpg", + "images/task_00078_145_258_camera_top.jpg", + "images/task_00078_145_263_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00078", + "split": "eval", + "split_group_id": "task_00078__episode_000145", + "recording_id": null, + "episode_id": 145, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. contact\nC. hold and carry\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. hold and carry" + } + ], + "images": [ + "images/task_00078_145_180_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00078", + "split": "eval", + "split_group_id": "task_00078__episode_000145", + "recording_id": null, + "episode_id": 145, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the left side of the scene\nB. toward the right side of the scene\nC. toward the top of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the left side of the scene" + } + ], + "images": [ + "images/task_00078_145_108_camera_top.jpg", + "images/task_00078_145_113_camera_top.jpg", + "images/task_00078_145_118_camera_top.jpg", + "images/task_00078_145_123_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00078", + "split": "eval", + "split_group_id": "task_00078__episode_000145", + "recording_id": null, + "episode_id": 145, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. pre-approach\nC. approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. pre-approach" + } + ], + "images": [ + "images/task_00078_156_525_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00078", + "split": "eval", + "split_group_id": "task_00078__episode_000156", + "recording_id": null, + "episode_id": 156, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. approach\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. hold and carry" + } + ], + "images": [ + "images/task_00079_6_1198_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00079", + "split": "eval", + "split_group_id": "task_00079__episode_000006", + "recording_id": null, + "episode_id": 6, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00079_9_772_camera_top.jpg", + "images/task_00079_9_775_camera_top.jpg", + "images/task_00079_9_778_camera_top.jpg", + "images/task_00079_9_781_camera_top.jpg", + "images/task_00079_9_784_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00079", + "split": "eval", + "split_group_id": "task_00079__episode_000009", + "recording_id": null, + "episode_id": 9, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the bottom of the scene\nA. toward the right side of the scene\nB. toward the top of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00079_15_1604_camera_top.jpg", + "images/task_00079_15_1609_camera_top.jpg", + "images/task_00079_15_1614_camera_top.jpg", + "images/task_00079_15_1619_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00079", + "split": "eval", + "split_group_id": "task_00079__episode_000015", + "recording_id": null, + "episode_id": 15, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the top of the scene\nB. toward the left side of the scene\nC. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00079_94_776_camera_top.jpg", + "images/task_00079_94_781_camera_top.jpg", + "images/task_00079_94_786_camera_top.jpg", + "images/task_00079_94_791_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00079", + "split": "eval", + "split_group_id": "task_00079__episode_000094", + "recording_id": null, + "episode_id": 94, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the top of the scene\nC. toward the bottom of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00079_106_1592_camera_top.jpg", + "images/task_00079_106_1597_camera_top.jpg", + "images/task_00079_106_1602_camera_top.jpg", + "images/task_00079_106_1607_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00079", + "split": "eval", + "split_group_id": "task_00079__episode_000106", + "recording_id": null, + "episode_id": 106, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the left side of the scene\nA. toward the bottom of the scene\nB. toward the top of the scene\nC. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the left side of the scene" + } + ], + "images": [ + "images/task_00080_0_364_camera_top.jpg", + "images/task_00080_0_369_camera_top.jpg", + "images/task_00080_0_374_camera_top.jpg", + "images/task_00080_0_379_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00080", + "split": "eval", + "split_group_id": "task_00080__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. hold and carry" + } + ], + "images": [ + "images/task_00080_5_212_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00080", + "split": "eval", + "split_group_id": "task_00080__episode_000005", + "recording_id": null, + "episode_id": 5, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the left side of the scene\nB. toward the bottom of the scene\nC. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00080_38_310_camera_top.jpg", + "images/task_00080_38_315_camera_top.jpg", + "images/task_00080_38_320_camera_top.jpg", + "images/task_00080_38_325_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00080", + "split": "eval", + "split_group_id": "task_00080__episode_000038", + "recording_id": null, + "episode_id": 38, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00080_99_189_camera_top.jpg", + "images/task_00080_99_330_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00080", + "split": "eval", + "split_group_id": "task_00080__episode_000099", + "recording_id": null, + "episode_id": 99, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00080_99_274_camera_top.jpg", + "images/task_00080_99_172_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00080", + "split": "eval", + "split_group_id": "task_00080__episode_000099", + "recording_id": null, + "episode_id": 99, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00080_99_285_camera_top.jpg", + "images/task_00080_99_357_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00080", + "split": "eval", + "split_group_id": "task_00080__episode_000099", + "recording_id": null, + "episode_id": 99, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. hold and carry\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. approach" + } + ], + "images": [ + "images/task_00080_102_195_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00080", + "split": "eval", + "split_group_id": "task_00080__episode_000102", + "recording_id": null, + "episode_id": 102, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00080_112_194_camera_top.jpg", + "images/task_00080_112_197_camera_top.jpg", + "images/task_00080_112_200_camera_top.jpg", + "images/task_00080_112_203_camera_top.jpg", + "images/task_00080_112_206_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00080", + "split": "eval", + "split_group_id": "task_00080__episode_000112", + "recording_id": null, + "episode_id": 112, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the left side of the scene\nA. toward the right side of the scene\nC. toward the bottom of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the left side of the scene" + } + ], + "images": [ + "images/task_00080_114_339_camera_top.jpg", + "images/task_00080_114_344_camera_top.jpg", + "images/task_00080_114_349_camera_top.jpg", + "images/task_00080_114_354_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00080", + "split": "eval", + "split_group_id": "task_00080__episode_000114", + "recording_id": null, + "episode_id": 114, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00080_117_277_camera_top.jpg", + "images/task_00080_117_280_camera_top.jpg", + "images/task_00080_117_283_camera_top.jpg", + "images/task_00080_117_286_camera_top.jpg", + "images/task_00080_117_289_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00080", + "split": "eval", + "split_group_id": "task_00080__episode_000117", + "recording_id": null, + "episode_id": 117, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the left side of the scene\nA. toward the bottom of the scene\nB. toward the top of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the left side of the scene" + } + ], + "images": [ + "images/task_00081_8_728_camera_top.jpg", + "images/task_00081_8_733_camera_top.jpg", + "images/task_00081_8_738_camera_top.jpg", + "images/task_00081_8_743_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00081", + "split": "eval", + "split_group_id": "task_00081__episode_000008", + "recording_id": null, + "episode_id": 8, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the left side of the scene\nB. toward the top of the scene\nC. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00081_35_1687_camera_top.jpg", + "images/task_00081_35_1692_camera_top.jpg", + "images/task_00081_35_1697_camera_top.jpg", + "images/task_00081_35_1702_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00081", + "split": "eval", + "split_group_id": "task_00081__episode_000035", + "recording_id": null, + "episode_id": 35, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the bottom of the scene\nC. toward the top of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00081_47_136_camera_top.jpg", + "images/task_00081_47_141_camera_top.jpg", + "images/task_00081_47_146_camera_top.jpg", + "images/task_00081_47_151_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00081", + "split": "eval", + "split_group_id": "task_00081__episode_000047", + "recording_id": null, + "episode_id": 47, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the bottom of the scene\nB. toward the left side of the scene\nC. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00081_70_1505_camera_top.jpg", + "images/task_00081_70_1510_camera_top.jpg", + "images/task_00081_70_1515_camera_top.jpg", + "images/task_00081_70_1520_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00081", + "split": "eval", + "split_group_id": "task_00081__episode_000070", + "recording_id": null, + "episode_id": 70, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. approach\nC. transfer\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/task_00081_72_1494_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00081", + "split": "eval", + "split_group_id": "task_00081__episode_000072", + "recording_id": null, + "episode_id": 72, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the top of the scene\nC. toward the left side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00081_76_1374_camera_top.jpg", + "images/task_00081_76_1379_camera_top.jpg", + "images/task_00081_76_1384_camera_top.jpg", + "images/task_00081_76_1389_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00081", + "split": "eval", + "split_group_id": "task_00081__episode_000076", + "recording_id": null, + "episode_id": 76, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. pre-approach\nC. transfer\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. pre-approach" + } + ], + "images": [ + "images/task_00081_91_665_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00081", + "split": "eval", + "split_group_id": "task_00081__episode_000091", + "recording_id": null, + "episode_id": 91, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00081_91_1073_camera_top.jpg", + "images/task_00081_91_1076_camera_top.jpg", + "images/task_00081_91_1079_camera_top.jpg", + "images/task_00081_91_1082_camera_top.jpg", + "images/task_00081_91_1085_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00081", + "split": "eval", + "split_group_id": "task_00081__episode_000091", + "recording_id": null, + "episode_id": 91, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the left side of the scene\nC. toward the bottom of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00081_91_1903_camera_top.jpg", + "images/task_00081_91_1908_camera_top.jpg", + "images/task_00081_91_1913_camera_top.jpg", + "images/task_00081_91_1918_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00081", + "split": "eval", + "split_group_id": "task_00081__episode_000091", + "recording_id": null, + "episode_id": 91, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00081_91_1237_camera_top.jpg", + "images/task_00081_91_1468_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00081", + "split": "eval", + "split_group_id": "task_00081__episode_000091", + "recording_id": null, + "episode_id": 91, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00081_91_870_camera_top.jpg", + "images/task_00081_91_873_camera_top.jpg", + "images/task_00081_91_876_camera_top.jpg", + "images/task_00081_91_879_camera_top.jpg", + "images/task_00081_91_882_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00081", + "split": "eval", + "split_group_id": "task_00081__episode_000091", + "recording_id": null, + "episode_id": 91, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00081_91_1912_camera_top.jpg", + "images/task_00081_91_1915_camera_top.jpg", + "images/task_00081_91_1918_camera_top.jpg", + "images/task_00081_91_1921_camera_top.jpg", + "images/task_00081_91_1924_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00081", + "split": "eval", + "split_group_id": "task_00081__episode_000091", + "recording_id": null, + "episode_id": 91, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00081_91_1679_camera_top.jpg", + "images/task_00081_91_1682_camera_top.jpg", + "images/task_00081_91_1685_camera_top.jpg", + "images/task_00081_91_1688_camera_top.jpg", + "images/task_00081_91_1691_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00081", + "split": "eval", + "split_group_id": "task_00081__episode_000091", + "recording_id": null, + "episode_id": 91, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00081_91_1669_camera_top.jpg", + "images/task_00081_91_1672_camera_top.jpg", + "images/task_00081_91_1675_camera_top.jpg", + "images/task_00081_91_1678_camera_top.jpg", + "images/task_00081_91_1681_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00081", + "split": "eval", + "split_group_id": "task_00081__episode_000091", + "recording_id": null, + "episode_id": 91, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00081_94_1065_camera_top.jpg", + "images/task_00081_94_1187_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00081", + "split": "eval", + "split_group_id": "task_00081__episode_000094", + "recording_id": null, + "episode_id": 94, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_00081_94_1271_camera_top.jpg", + "images/task_00081_94_1063_camera_top.jpg", + "images/task_00081_94_1016_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00081", + "split": "eval", + "split_group_id": "task_00081__episode_000094", + "recording_id": null, + "episode_id": 94, + "camera": "camera_top", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00081_94_1119_camera_top_t-6.jpg", + "images/task_00081_94_1122_camera_top_t-3.jpg", + "images/task_00081_94_1125_camera_top_t0.jpg", + "images/task_00081_94_1128_camera_top_t+3.jpg", + "images/task_00081_94_1131_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00081", + "split": "eval", + "split_group_id": "task_00081__episode_000094", + "recording_id": null, + "episode_id": 94, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00081_94_1199_camera_top.jpg", + "images/task_00081_94_1058_camera_top.jpg", + "images/task_00081_94_1005_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00081", + "split": "eval", + "split_group_id": "task_00081__episode_000094", + "recording_id": null, + "episode_id": 94, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. release\nC. pre-approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. hold and carry" + } + ], + "images": [ + "images/task_00081_94_1037_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00081", + "split": "eval", + "split_group_id": "task_00081__episode_000094", + "recording_id": null, + "episode_id": 94, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00081_94_1034_camera_top.jpg", + "images/task_00081_94_1037_camera_top.jpg", + "images/task_00081_94_1040_camera_top.jpg", + "images/task_00081_94_1043_camera_top.jpg", + "images/task_00081_94_1046_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00081", + "split": "eval", + "split_group_id": "task_00081__episode_000094", + "recording_id": null, + "episode_id": 94, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00081_94_1144_camera_top.jpg", + "images/task_00081_94_1012_camera_top.jpg", + "images/task_00081_94_1050_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00081", + "split": "eval", + "split_group_id": "task_00081__episode_000094", + "recording_id": null, + "episode_id": 94, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_00081_114_1190_camera_top.jpg", + "images/task_00081_114_834_camera_top.jpg", + "images/task_00081_114_957_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00081", + "split": "eval", + "split_group_id": "task_00081__episode_000114", + "recording_id": null, + "episode_id": 114, + "camera": "camera_top", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_00082_27_902_camera_top.jpg", + "images/task_00082_27_232_camera_top.jpg", + "images/task_00082_27_456_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00082", + "split": "eval", + "split_group_id": "task_00082__episode_000027", + "recording_id": null, + "episode_id": 27, + "camera": "camera_top", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. hold and carry" + } + ], + "images": [ + "images/task_00082_69_604_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00082", + "split": "eval", + "split_group_id": "task_00082__episode_000069", + "recording_id": null, + "episode_id": 69, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00083_7_496_camera_top.jpg", + "images/task_00083_7_499_camera_top.jpg", + "images/task_00083_7_502_camera_top.jpg", + "images/task_00083_7_505_camera_top.jpg", + "images/task_00083_7_508_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00083", + "split": "eval", + "split_group_id": "task_00083__episode_000007", + "recording_id": null, + "episode_id": 7, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. release\nC. pre-approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/task_00083_26_730_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00083", + "split": "eval", + "split_group_id": "task_00083__episode_000026", + "recording_id": null, + "episode_id": 26, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00083_39_1207_camera_top.jpg", + "images/task_00083_39_1210_camera_top.jpg", + "images/task_00083_39_1213_camera_top.jpg", + "images/task_00083_39_1216_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00083", + "split": "eval", + "split_group_id": "task_00083__episode_000039", + "recording_id": null, + "episode_id": 39, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00083_39_1372_camera_top.jpg", + "images/task_00083_39_1375_camera_top.jpg", + "images/task_00083_39_1378_camera_top.jpg", + "images/task_00083_39_1381_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00083", + "split": "eval", + "split_group_id": "task_00083__episode_000039", + "recording_id": null, + "episode_id": 39, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00083_39_508_camera_top.jpg", + "images/task_00083_39_511_camera_top.jpg", + "images/task_00083_39_514_camera_top.jpg", + "images/task_00083_39_517_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00083", + "split": "eval", + "split_group_id": "task_00083__episode_000039", + "recording_id": null, + "episode_id": 39, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00083_39_862_camera_top.jpg", + "images/task_00083_39_865_camera_top.jpg", + "images/task_00083_39_868_camera_top.jpg", + "images/task_00083_39_871_camera_top.jpg", + "images/task_00083_39_874_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00083", + "split": "eval", + "split_group_id": "task_00083__episode_000039", + "recording_id": null, + "episode_id": 39, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the left side of the scene\nA. toward the right side of the scene\nB. toward the top of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the left side of the scene" + } + ], + "images": [ + "images/task_00083_39_908_camera_top.jpg", + "images/task_00083_39_913_camera_top.jpg", + "images/task_00083_39_918_camera_top.jpg", + "images/task_00083_39_923_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00083", + "split": "eval", + "split_group_id": "task_00083__episode_000039", + "recording_id": null, + "episode_id": 39, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00083_39_1206_camera_top.jpg", + "images/task_00083_39_1209_camera_top.jpg", + "images/task_00083_39_1212_camera_top.jpg", + "images/task_00083_39_1215_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00083", + "split": "eval", + "split_group_id": "task_00083__episode_000039", + "recording_id": null, + "episode_id": 39, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00083_39_1208_camera_top.jpg", + "images/task_00083_39_1211_camera_top.jpg", + "images/task_00083_39_1214_camera_top.jpg", + "images/task_00083_39_1217_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00083", + "split": "eval", + "split_group_id": "task_00083__episode_000039", + "recording_id": null, + "episode_id": 39, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. transfer\nC. approach\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/task_00083_39_745_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00083", + "split": "eval", + "split_group_id": "task_00083__episode_000039", + "recording_id": null, + "episode_id": 39, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00083_39_986_camera_top.jpg", + "images/task_00083_39_310_camera_top.jpg", + "images/task_00083_39_1107_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00083", + "split": "eval", + "split_group_id": "task_00083__episode_000039", + "recording_id": null, + "episode_id": 39, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. hold and carry\nC. contact\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. hold and carry" + } + ], + "images": [ + "images/task_00084_17_289_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00084", + "split": "eval", + "split_group_id": "task_00084__episode_000017", + "recording_id": null, + "episode_id": 17, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the top of the scene\nC. toward the left side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00084_108_1229_camera_top.jpg", + "images/task_00084_108_1234_camera_top.jpg", + "images/task_00084_108_1239_camera_top.jpg", + "images/task_00084_108_1244_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00084", + "split": "eval", + "split_group_id": "task_00084__episode_000108", + "recording_id": null, + "episode_id": 108, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00084_108_1123_camera_top.jpg", + "images/task_00084_108_1236_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00084", + "split": "eval", + "split_group_id": "task_00084__episode_000108", + "recording_id": null, + "episode_id": 108, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00084_108_430_camera_top.jpg", + "images/task_00084_108_433_camera_top.jpg", + "images/task_00084_108_436_camera_top.jpg", + "images/task_00084_108_439_camera_top.jpg", + "images/task_00084_108_442_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00084", + "split": "eval", + "split_group_id": "task_00084__episode_000108", + "recording_id": null, + "episode_id": 108, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00084_108_1025_camera_top.jpg", + "images/task_00084_108_1028_camera_top.jpg", + "images/task_00084_108_1031_camera_top.jpg", + "images/task_00084_108_1034_camera_top.jpg", + "images/task_00084_108_1037_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00084", + "split": "eval", + "split_group_id": "task_00084__episode_000108", + "recording_id": null, + "episode_id": 108, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the right side of the scene\nB. toward the bottom of the scene\nC. toward the top of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the right side of the scene" + } + ], + "images": [ + "images/task_00084_108_1234_camera_top.jpg", + "images/task_00084_108_1239_camera_top.jpg", + "images/task_00084_108_1244_camera_top.jpg", + "images/task_00084_108_1249_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00084", + "split": "eval", + "split_group_id": "task_00084__episode_000108", + "recording_id": null, + "episode_id": 108, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00084_108_365_camera_top.jpg", + "images/task_00084_108_133_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00084", + "split": "eval", + "split_group_id": "task_00084__episode_000108", + "recording_id": null, + "episode_id": 108, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00084_108_1064_camera_top.jpg", + "images/task_00084_108_1067_camera_top.jpg", + "images/task_00084_108_1070_camera_top.jpg", + "images/task_00084_108_1073_camera_top.jpg", + "images/task_00084_108_1076_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00084", + "split": "eval", + "split_group_id": "task_00084__episode_000108", + "recording_id": null, + "episode_id": 108, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_00084_108_128_camera_top.jpg", + "images/task_00084_108_454_camera_top.jpg", + "images/task_00084_108_1200_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00084", + "split": "eval", + "split_group_id": "task_00084__episode_000108", + "recording_id": null, + "episode_id": 108, + "camera": "camera_top", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00084_130_228_camera_top.jpg", + "images/task_00084_130_1092_camera_top.jpg", + "images/task_00084_130_1487_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00084", + "split": "eval", + "split_group_id": "task_00084__episode_000130", + "recording_id": null, + "episode_id": 130, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00084_130_1038_camera_top.jpg", + "images/task_00084_130_1235_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00084", + "split": "eval", + "split_group_id": "task_00084__episode_000130", + "recording_id": null, + "episode_id": 130, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00084_130_280_camera_top.jpg", + "images/task_00084_130_283_camera_top.jpg", + "images/task_00084_130_286_camera_top.jpg", + "images/task_00084_130_289_camera_top.jpg", + "images/task_00084_130_292_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00084", + "split": "eval", + "split_group_id": "task_00084__episode_000130", + "recording_id": null, + "episode_id": 130, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the left side of the scene\nA. toward the right side of the scene\nB. toward the top of the scene\nC. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the left side of the scene" + } + ], + "images": [ + "images/task_00084_130_845_camera_top.jpg", + "images/task_00084_130_850_camera_top.jpg", + "images/task_00084_130_855_camera_top.jpg", + "images/task_00084_130_860_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00084", + "split": "eval", + "split_group_id": "task_00084__episode_000130", + "recording_id": null, + "episode_id": 130, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00084_130_590_camera_top.jpg", + "images/task_00084_130_650_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00084", + "split": "eval", + "split_group_id": "task_00084__episode_000130", + "recording_id": null, + "episode_id": 130, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. hold and carry\nC. pre-approach\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. hold and carry" + } + ], + "images": [ + "images/task_00084_130_286_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00084", + "split": "eval", + "split_group_id": "task_00084__episode_000130", + "recording_id": null, + "episode_id": 130, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00084_130_269_camera_top.jpg", + "images/task_00084_130_206_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00084", + "split": "eval", + "split_group_id": "task_00084__episode_000130", + "recording_id": null, + "episode_id": 130, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00084_130_591_camera_top.jpg", + "images/task_00084_130_594_camera_top.jpg", + "images/task_00084_130_597_camera_top.jpg", + "images/task_00084_130_600_camera_top.jpg", + "images/task_00084_130_603_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00084", + "split": "eval", + "split_group_id": "task_00084__episode_000130", + "recording_id": null, + "episode_id": 130, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/task_00084_142_2026_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00084", + "split": "eval", + "split_group_id": "task_00084__episode_000142", + "recording_id": null, + "episode_id": 142, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the right side of the scene\nA. toward the left side of the scene\nB. toward the top of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the right side of the scene" + } + ], + "images": [ + "images/task_00084_165_867_camera_top.jpg", + "images/task_00084_165_872_camera_top.jpg", + "images/task_00084_165_877_camera_top.jpg", + "images/task_00084_165_882_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00084", + "split": "eval", + "split_group_id": "task_00084__episode_000165", + "recording_id": null, + "episode_id": 165, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the right side of the scene\nB. toward the bottom of the scene\nC. toward the left side of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the right side of the scene" + } + ], + "images": [ + "images/task_00084_204_862_camera_top.jpg", + "images/task_00084_204_867_camera_top.jpg", + "images/task_00084_204_872_camera_top.jpg", + "images/task_00084_204_877_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00084", + "split": "eval", + "split_group_id": "task_00084__episode_000204", + "recording_id": null, + "episode_id": 204, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the top of the scene\nB. toward the left side of the scene\nC. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00084_279_312_camera_top.jpg", + "images/task_00084_279_317_camera_top.jpg", + "images/task_00084_279_322_camera_top.jpg", + "images/task_00084_279_327_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00084", + "split": "eval", + "split_group_id": "task_00084__episode_000279", + "recording_id": null, + "episode_id": 279, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the left side of the scene\nA. toward the right side of the scene\nC. toward the bottom of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the left side of the scene" + } + ], + "images": [ + "images/task_00085_8_157_camera_top.jpg", + "images/task_00085_8_162_camera_top.jpg", + "images/task_00085_8_167_camera_top.jpg", + "images/task_00085_8_172_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00085", + "split": "eval", + "split_group_id": "task_00085__episode_000008", + "recording_id": null, + "episode_id": 8, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. hold and carry\nC. release\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/task_00085_13_444_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00085", + "split": "eval", + "split_group_id": "task_00085__episode_000013", + "recording_id": null, + "episode_id": 13, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00085_24_240_camera_top.jpg", + "images/task_00085_24_243_camera_top.jpg", + "images/task_00085_24_246_camera_top.jpg", + "images/task_00085_24_249_camera_top.jpg", + "images/task_00085_24_252_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00085", + "split": "eval", + "split_group_id": "task_00085__episode_000024", + "recording_id": null, + "episode_id": 24, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00085_43_327_camera_top.jpg", + "images/task_00085_43_153_camera_top.jpg", + "images/task_00085_43_189_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00085", + "split": "eval", + "split_group_id": "task_00085__episode_000043", + "recording_id": null, + "episode_id": 43, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. release\nC. transfer\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/task_00085_59_376_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00085", + "split": "eval", + "split_group_id": "task_00085__episode_000059", + "recording_id": null, + "episode_id": 59, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00085_83_192_camera_top.jpg", + "images/task_00085_83_195_camera_top.jpg", + "images/task_00085_83_198_camera_top.jpg", + "images/task_00085_83_201_camera_top.jpg", + "images/task_00085_83_204_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00085", + "split": "eval", + "split_group_id": "task_00085__episode_000083", + "recording_id": null, + "episode_id": 83, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the bottom of the scene\nB. toward the right side of the scene\nC. toward the top of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00085_91_159_camera_top.jpg", + "images/task_00085_91_164_camera_top.jpg", + "images/task_00085_91_169_camera_top.jpg", + "images/task_00085_91_174_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00085", + "split": "eval", + "split_group_id": "task_00085__episode_000091", + "recording_id": null, + "episode_id": 91, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the bottom of the scene\nA. toward the top of the scene\nB. toward the left side of the scene\nC. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00085_123_216_camera_top.jpg", + "images/task_00085_123_221_camera_top.jpg", + "images/task_00085_123_226_camera_top.jpg", + "images/task_00085_123_231_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00085", + "split": "eval", + "split_group_id": "task_00085__episode_000123", + "recording_id": null, + "episode_id": 123, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. release\nC. transfer\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. pre-approach" + } + ], + "images": [ + "images/task_00086_7_277_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00086", + "split": "eval", + "split_group_id": "task_00086__episode_000007", + "recording_id": null, + "episode_id": 7, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. release\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. pre-approach" + } + ], + "images": [ + "images/task_00086_13_332_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00086", + "split": "eval", + "split_group_id": "task_00086__episode_000013", + "recording_id": null, + "episode_id": 13, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the bottom of the scene\nA. toward the right side of the scene\nC. toward the top of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00086_21_118_camera_top.jpg", + "images/task_00086_21_123_camera_top.jpg", + "images/task_00086_21_128_camera_top.jpg", + "images/task_00086_21_133_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00086", + "split": "eval", + "split_group_id": "task_00086__episode_000021", + "recording_id": null, + "episode_id": 21, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_00086_21_178_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00086", + "split": "eval", + "split_group_id": "task_00086__episode_000021", + "recording_id": null, + "episode_id": 21, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00086_21_122_camera_top.jpg", + "images/task_00086_21_329_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00086", + "split": "eval", + "split_group_id": "task_00086__episode_000021", + "recording_id": null, + "episode_id": 21, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_00086_21_193_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00086", + "split": "eval", + "split_group_id": "task_00086__episode_000021", + "recording_id": null, + "episode_id": 21, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00086_21_165_camera_top.jpg", + "images/task_00086_21_168_camera_top.jpg", + "images/task_00086_21_171_camera_top.jpg", + "images/task_00086_21_174_camera_top.jpg", + "images/task_00086_21_177_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00086", + "split": "eval", + "split_group_id": "task_00086__episode_000021", + "recording_id": null, + "episode_id": 21, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00086_21_215_camera_top.jpg", + "images/task_00086_21_218_camera_top.jpg", + "images/task_00086_21_221_camera_top.jpg", + "images/task_00086_21_224_camera_top.jpg", + "images/task_00086_21_227_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00086", + "split": "eval", + "split_group_id": "task_00086__episode_000021", + "recording_id": null, + "episode_id": 21, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00086_21_249_camera_top.jpg", + "images/task_00086_21_252_camera_top.jpg", + "images/task_00086_21_255_camera_top.jpg", + "images/task_00086_21_258_camera_top.jpg", + "images/task_00086_21_261_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00086", + "split": "eval", + "split_group_id": "task_00086__episode_000021", + "recording_id": null, + "episode_id": 21, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the left side of the scene\nB. toward the top of the scene\nC. toward the bottom of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the left side of the scene" + } + ], + "images": [ + "images/task_00086_21_264_camera_top.jpg", + "images/task_00086_21_269_camera_top.jpg", + "images/task_00086_21_274_camera_top.jpg", + "images/task_00086_21_279_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00086", + "split": "eval", + "split_group_id": "task_00086__episode_000021", + "recording_id": null, + "episode_id": 21, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the right side of the scene\nA. toward the top of the scene\nB. toward the bottom of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the right side of the scene" + } + ], + "images": [ + "images/task_00086_21_196_camera_top.jpg", + "images/task_00086_21_201_camera_top.jpg", + "images/task_00086_21_206_camera_top.jpg", + "images/task_00086_21_211_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00086", + "split": "eval", + "split_group_id": "task_00086__episode_000021", + "recording_id": null, + "episode_id": 21, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00086_21_281_camera_top_t-6.jpg", + "images/task_00086_21_284_camera_top_t-3.jpg", + "images/task_00086_21_287_camera_top_t0.jpg", + "images/task_00086_21_290_camera_top_t+3.jpg", + "images/task_00086_21_293_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00086", + "split": "eval", + "split_group_id": "task_00086__episode_000021", + "recording_id": null, + "episode_id": 21, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the left side of the scene\nB. toward the top of the scene\nC. toward the bottom of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the left side of the scene" + } + ], + "images": [ + "images/task_00086_21_263_camera_top.jpg", + "images/task_00086_21_268_camera_top.jpg", + "images/task_00086_21_273_camera_top.jpg", + "images/task_00086_21_278_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00086", + "split": "eval", + "split_group_id": "task_00086__episode_000021", + "recording_id": null, + "episode_id": 21, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. hold and carry\nC. transfer\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. pre-approach" + } + ], + "images": [ + "images/task_00086_44_360_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00086", + "split": "eval", + "split_group_id": "task_00086__episode_000044", + "recording_id": null, + "episode_id": 44, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the right side of the scene\nB. toward the bottom of the scene\nC. toward the top of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the right side of the scene" + } + ], + "images": [ + "images/task_00086_50_147_camera_top.jpg", + "images/task_00086_50_152_camera_top.jpg", + "images/task_00086_50_157_camera_top.jpg", + "images/task_00086_50_162_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00086", + "split": "eval", + "split_group_id": "task_00086__episode_000050", + "recording_id": null, + "episode_id": 50, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. contact\nC. pre-approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/task_00086_50_183_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00086", + "split": "eval", + "split_group_id": "task_00086__episode_000050", + "recording_id": null, + "episode_id": 50, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. contact\nC. approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. hold and carry" + } + ], + "images": [ + "images/task_00086_50_125_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00086", + "split": "eval", + "split_group_id": "task_00086__episode_000050", + "recording_id": null, + "episode_id": 50, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the left side of the scene\nB. toward the bottom of the scene\nC. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00086_50_151_camera_top.jpg", + "images/task_00086_50_156_camera_top.jpg", + "images/task_00086_50_161_camera_top.jpg", + "images/task_00086_50_166_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00086", + "split": "eval", + "split_group_id": "task_00086__episode_000050", + "recording_id": null, + "episode_id": 50, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00086_50_133_camera_top.jpg", + "images/task_00086_50_136_camera_top.jpg", + "images/task_00086_50_139_camera_top.jpg", + "images/task_00086_50_142_camera_top.jpg", + "images/task_00086_50_145_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00086", + "split": "eval", + "split_group_id": "task_00086__episode_000050", + "recording_id": null, + "episode_id": 50, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00086_50_163_camera_top.jpg", + "images/task_00086_50_226_camera_top.jpg", + "images/task_00086_50_97_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00086", + "split": "eval", + "split_group_id": "task_00086__episode_000050", + "recording_id": null, + "episode_id": 50, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00086_50_158_camera_top_t-6.jpg", + "images/task_00086_50_161_camera_top_t-3.jpg", + "images/task_00086_50_164_camera_top_t0.jpg", + "images/task_00086_50_167_camera_top_t+3.jpg", + "images/task_00086_50_170_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00086", + "split": "eval", + "split_group_id": "task_00086__episode_000050", + "recording_id": null, + "episode_id": 50, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00086_60_207_camera_top.jpg", + "images/task_00086_60_210_camera_top.jpg", + "images/task_00086_60_213_camera_top.jpg", + "images/task_00086_60_216_camera_top.jpg", + "images/task_00086_60_219_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00086", + "split": "eval", + "split_group_id": "task_00086__episode_000060", + "recording_id": null, + "episode_id": 60, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the left side of the scene\nB. toward the bottom of the scene\nC. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00086_60_203_camera_top.jpg", + "images/task_00086_60_208_camera_top.jpg", + "images/task_00086_60_213_camera_top.jpg", + "images/task_00086_60_218_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00086", + "split": "eval", + "split_group_id": "task_00086__episode_000060", + "recording_id": null, + "episode_id": 60, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00086_60_148_camera_top_t-6.jpg", + "images/task_00086_60_151_camera_top_t-3.jpg", + "images/task_00086_60_154_camera_top_t0.jpg", + "images/task_00086_60_157_camera_top_t+3.jpg", + "images/task_00086_60_160_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00086", + "split": "eval", + "split_group_id": "task_00086__episode_000060", + "recording_id": null, + "episode_id": 60, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00086_60_104_camera_top.jpg", + "images/task_00086_60_237_camera_top.jpg", + "images/task_00086_60_147_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00086", + "split": "eval", + "split_group_id": "task_00086__episode_000060", + "recording_id": null, + "episode_id": 60, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the top of the scene\nA. toward the bottom of the scene\nB. toward the right side of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the top of the scene" + } + ], + "images": [ + "images/task_00086_60_319_camera_top.jpg", + "images/task_00086_60_324_camera_top.jpg", + "images/task_00086_60_329_camera_top.jpg", + "images/task_00086_60_334_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00086", + "split": "eval", + "split_group_id": "task_00086__episode_000060", + "recording_id": null, + "episode_id": 60, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the right side of the scene\nB. toward the bottom of the scene\nC. toward the top of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the right side of the scene" + } + ], + "images": [ + "images/task_00086_60_207_camera_top.jpg", + "images/task_00086_60_212_camera_top.jpg", + "images/task_00086_60_217_camera_top.jpg", + "images/task_00086_60_222_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00086", + "split": "eval", + "split_group_id": "task_00086__episode_000060", + "recording_id": null, + "episode_id": 60, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00086_60_237_camera_top.jpg", + "images/task_00086_60_130_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00086", + "split": "eval", + "split_group_id": "task_00086__episode_000060", + "recording_id": null, + "episode_id": 60, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. pre-approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/task_00086_60_239_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00086", + "split": "eval", + "split_group_id": "task_00086__episode_000060", + "recording_id": null, + "episode_id": 60, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. approach\nC. pre-approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. pre-approach" + } + ], + "images": [ + "images/task_00086_94_378_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00086", + "split": "eval", + "split_group_id": "task_00086__episode_000094", + "recording_id": null, + "episode_id": 94, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. pre-approach\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. pre-approach" + } + ], + "images": [ + "images/task_00086_121_88_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00086", + "split": "eval", + "split_group_id": "task_00086__episode_000121", + "recording_id": null, + "episode_id": 121, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. approach\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/task_00086_129_151_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00086", + "split": "eval", + "split_group_id": "task_00086__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00086_129_239_camera_top.jpg", + "images/task_00086_129_242_camera_top.jpg", + "images/task_00086_129_245_camera_top.jpg", + "images/task_00086_129_248_camera_top.jpg", + "images/task_00086_129_251_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00086", + "split": "eval", + "split_group_id": "task_00086__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00086_129_152_camera_top.jpg", + "images/task_00086_129_396_camera_top.jpg", + "images/task_00086_129_216_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00086", + "split": "eval", + "split_group_id": "task_00086__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00086_129_314_camera_top.jpg", + "images/task_00086_129_223_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00086", + "split": "eval", + "split_group_id": "task_00086__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00086_129_332_camera_top.jpg", + "images/task_00086_129_170_camera_top.jpg", + "images/task_00086_129_127_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00086", + "split": "eval", + "split_group_id": "task_00086__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00086_129_385_camera_top_t-6.jpg", + "images/task_00086_129_388_camera_top_t-3.jpg", + "images/task_00086_129_391_camera_top_t0.jpg", + "images/task_00086_129_394_camera_top_t+3.jpg", + "images/task_00086_129_397_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00086", + "split": "eval", + "split_group_id": "task_00086__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00086_129_288_camera_top.jpg", + "images/task_00086_129_291_camera_top.jpg", + "images/task_00086_129_294_camera_top.jpg", + "images/task_00086_129_297_camera_top.jpg", + "images/task_00086_129_300_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00086", + "split": "eval", + "split_group_id": "task_00086__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00086_129_171_camera_top.jpg", + "images/task_00086_129_174_camera_top.jpg", + "images/task_00086_129_177_camera_top.jpg", + "images/task_00086_129_180_camera_top.jpg", + "images/task_00086_129_183_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00086", + "split": "eval", + "split_group_id": "task_00086__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00086_129_133_camera_top_t-6.jpg", + "images/task_00086_129_136_camera_top_t-3.jpg", + "images/task_00086_129_139_camera_top_t0.jpg", + "images/task_00086_129_142_camera_top_t+3.jpg", + "images/task_00086_129_145_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00086", + "split": "eval", + "split_group_id": "task_00086__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00086_129_119_camera_top.jpg", + "images/task_00086_129_280_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00086", + "split": "eval", + "split_group_id": "task_00086__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the left side of the scene\nC. toward the bottom of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00086_129_276_camera_top.jpg", + "images/task_00086_129_281_camera_top.jpg", + "images/task_00086_129_286_camera_top.jpg", + "images/task_00086_129_291_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00086", + "split": "eval", + "split_group_id": "task_00086__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00087_26_366_camera_top.jpg", + "images/task_00087_26_369_camera_top.jpg", + "images/task_00087_26_372_camera_top.jpg", + "images/task_00087_26_375_camera_top.jpg", + "images/task_00087_26_378_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00087", + "split": "eval", + "split_group_id": "task_00087__episode_000026", + "recording_id": null, + "episode_id": 26, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the left side of the scene\nB. toward the bottom of the scene\nC. toward the top of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the left side of the scene" + } + ], + "images": [ + "images/task_00087_32_111_camera_top.jpg", + "images/task_00087_32_116_camera_top.jpg", + "images/task_00087_32_121_camera_top.jpg", + "images/task_00087_32_126_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00087", + "split": "eval", + "split_group_id": "task_00087__episode_000032", + "recording_id": null, + "episode_id": 32, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00087_45_238_camera_top.jpg", + "images/task_00087_45_241_camera_top.jpg", + "images/task_00087_45_244_camera_top.jpg", + "images/task_00087_45_247_camera_top.jpg", + "images/task_00087_45_250_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00087", + "split": "eval", + "split_group_id": "task_00087__episode_000045", + "recording_id": null, + "episode_id": 45, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. contact\nC. hold and carry\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/task_00087_90_87_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00087", + "split": "eval", + "split_group_id": "task_00087__episode_000090", + "recording_id": null, + "episode_id": 90, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the right side of the scene\nB. toward the bottom of the scene\nC. toward the left side of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the right side of the scene" + } + ], + "images": [ + "images/task_00087_98_456_camera_top.jpg", + "images/task_00087_98_461_camera_top.jpg", + "images/task_00087_98_466_camera_top.jpg", + "images/task_00087_98_471_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00087", + "split": "eval", + "split_group_id": "task_00087__episode_000098", + "recording_id": null, + "episode_id": 98, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00087_122_250_camera_top.jpg", + "images/task_00087_122_253_camera_top.jpg", + "images/task_00087_122_256_camera_top.jpg", + "images/task_00087_122_259_camera_top.jpg", + "images/task_00087_122_262_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00087", + "split": "eval", + "split_group_id": "task_00087__episode_000122", + "recording_id": null, + "episode_id": 122, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. release\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. pre-approach" + } + ], + "images": [ + "images/task_00088_48_185_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00088", + "split": "eval", + "split_group_id": "task_00088__episode_000048", + "recording_id": null, + "episode_id": 48, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_00088_64_723_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00088", + "split": "eval", + "split_group_id": "task_00088__episode_000064", + "recording_id": null, + "episode_id": 64, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_00088_64_988_camera_top.jpg", + "images/task_00088_64_1251_camera_top.jpg", + "images/task_00088_64_264_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00088", + "split": "eval", + "split_group_id": "task_00088__episode_000064", + "recording_id": null, + "episode_id": 64, + "camera": "camera_top", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_00088_64_480_camera_top.jpg", + "images/task_00088_64_440_camera_top.jpg", + "images/task_00088_64_637_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00088", + "split": "eval", + "split_group_id": "task_00088__episode_000064", + "recording_id": null, + "episode_id": 64, + "camera": "camera_top", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_00088_64_481_camera_top.jpg", + "images/task_00088_64_265_camera_top.jpg", + "images/task_00088_64_686_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00088", + "split": "eval", + "split_group_id": "task_00088__episode_000064", + "recording_id": null, + "episode_id": 64, + "camera": "camera_top", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_00088_64_686_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00088", + "split": "eval", + "split_group_id": "task_00088__episode_000064", + "recording_id": null, + "episode_id": 64, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00088_64_638_camera_top.jpg", + "images/task_00088_64_641_camera_top.jpg", + "images/task_00088_64_644_camera_top.jpg", + "images/task_00088_64_647_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00088", + "split": "eval", + "split_group_id": "task_00088__episode_000064", + "recording_id": null, + "episode_id": 64, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00088_64_980_camera_top.jpg", + "images/task_00088_64_660_camera_top.jpg", + "images/task_00088_64_1236_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00088", + "split": "eval", + "split_group_id": "task_00088__episode_000064", + "recording_id": null, + "episode_id": 64, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00088_64_1263_camera_top.jpg", + "images/task_00088_64_1266_camera_top.jpg", + "images/task_00088_64_1269_camera_top.jpg", + "images/task_00088_64_1272_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00088", + "split": "eval", + "split_group_id": "task_00088__episode_000064", + "recording_id": null, + "episode_id": 64, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. hold and carry\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/task_00088_68_783_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00088", + "split": "eval", + "split_group_id": "task_00088__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00088_68_432_camera_top.jpg", + "images/task_00088_68_213_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00088", + "split": "eval", + "split_group_id": "task_00088__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00088_68_222_camera_top.jpg", + "images/task_00088_68_354_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00088", + "split": "eval", + "split_group_id": "task_00088__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the right side of the scene\nB. toward the left side of the scene\nC. toward the bottom of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the right side of the scene" + } + ], + "images": [ + "images/task_00088_68_679_camera_top.jpg", + "images/task_00088_68_684_camera_top.jpg", + "images/task_00088_68_689_camera_top.jpg", + "images/task_00088_68_694_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00088", + "split": "eval", + "split_group_id": "task_00088__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. release\nC. contact\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. approach" + } + ], + "images": [ + "images/task_00088_68_806_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00088", + "split": "eval", + "split_group_id": "task_00088__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the left side of the scene\nA. toward the right side of the scene\nB. toward the top of the scene\nC. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the left side of the scene" + } + ], + "images": [ + "images/task_00088_68_890_camera_top.jpg", + "images/task_00088_68_895_camera_top.jpg", + "images/task_00088_68_900_camera_top.jpg", + "images/task_00088_68_905_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00088", + "split": "eval", + "split_group_id": "task_00088__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00088_68_718_camera_top_t-6.jpg", + "images/task_00088_68_721_camera_top_t-3.jpg", + "images/task_00088_68_724_camera_top_t0.jpg", + "images/task_00088_68_727_camera_top_t+3.jpg", + "images/task_00088_68_730_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00088", + "split": "eval", + "split_group_id": "task_00088__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. hold and carry\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. pre-approach" + } + ], + "images": [ + "images/task_00088_103_198_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00088", + "split": "eval", + "split_group_id": "task_00088__episode_000103", + "recording_id": null, + "episode_id": 103, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. contact\nC. pre-approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. pre-approach" + } + ], + "images": [ + "images/task_00089_19_666_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00089", + "split": "eval", + "split_group_id": "task_00089__episode_000019", + "recording_id": null, + "episode_id": 19, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the left side of the scene\nC. toward the bottom of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00089_23_845_camera_top.jpg", + "images/task_00089_23_850_camera_top.jpg", + "images/task_00089_23_855_camera_top.jpg", + "images/task_00089_23_860_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00089", + "split": "eval", + "split_group_id": "task_00089__episode_000023", + "recording_id": null, + "episode_id": 23, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00089_42_307_camera_top.jpg", + "images/task_00089_42_310_camera_top.jpg", + "images/task_00089_42_313_camera_top.jpg", + "images/task_00089_42_316_camera_top.jpg", + "images/task_00089_42_319_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00089", + "split": "eval", + "split_group_id": "task_00089__episode_000042", + "recording_id": null, + "episode_id": 42, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00089_109_332_camera_top.jpg", + "images/task_00089_109_335_camera_top.jpg", + "images/task_00089_109_338_camera_top.jpg", + "images/task_00089_109_341_camera_top.jpg", + "images/task_00089_109_344_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00089", + "split": "eval", + "split_group_id": "task_00089__episode_000109", + "recording_id": null, + "episode_id": 109, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. approach\nC. hold and carry\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. hold and carry" + } + ], + "images": [ + "images/task_00089_115_191_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00089", + "split": "eval", + "split_group_id": "task_00089__episode_000115", + "recording_id": null, + "episode_id": 115, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. release\nC. pre-approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/task_00089_132_283_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00089", + "split": "eval", + "split_group_id": "task_00089__episode_000132", + "recording_id": null, + "episode_id": 132, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. approach\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. approach" + } + ], + "images": [ + "images/task_00089_142_619_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00089", + "split": "eval", + "split_group_id": "task_00089__episode_000142", + "recording_id": null, + "episode_id": 142, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. transfer\nC. pre-approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. pre-approach" + } + ], + "images": [ + "images/task_00089_142_112_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00089", + "split": "eval", + "split_group_id": "task_00089__episode_000142", + "recording_id": null, + "episode_id": 142, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the bottom of the scene\nB. toward the top of the scene\nC. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00089_142_744_camera_top.jpg", + "images/task_00089_142_749_camera_top.jpg", + "images/task_00089_142_754_camera_top.jpg", + "images/task_00089_142_759_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00089", + "split": "eval", + "split_group_id": "task_00089__episode_000142", + "recording_id": null, + "episode_id": 142, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the left side of the scene\nB. toward the top of the scene\nC. toward the right side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the left side of the scene" + } + ], + "images": [ + "images/task_00089_142_684_camera_top.jpg", + "images/task_00089_142_689_camera_top.jpg", + "images/task_00089_142_694_camera_top.jpg", + "images/task_00089_142_699_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00089", + "split": "eval", + "split_group_id": "task_00089__episode_000142", + "recording_id": null, + "episode_id": 142, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00089_142_867_camera_top_t-6.jpg", + "images/task_00089_142_870_camera_top_t-3.jpg", + "images/task_00089_142_873_camera_top_t0.jpg", + "images/task_00089_142_876_camera_top_t+3.jpg", + "images/task_00089_142_879_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00089", + "split": "eval", + "split_group_id": "task_00089__episode_000142", + "recording_id": null, + "episode_id": 142, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_00089_142_724_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00089", + "split": "eval", + "split_group_id": "task_00089__episode_000142", + "recording_id": null, + "episode_id": 142, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_00089_142_794_camera_top.jpg", + "images/task_00089_142_190_camera_top.jpg", + "images/task_00089_142_378_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00089", + "split": "eval", + "split_group_id": "task_00089__episode_000142", + "recording_id": null, + "episode_id": 142, + "camera": "camera_top", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the left side of the scene\nA. toward the bottom of the scene\nB. toward the top of the scene\nC. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the left side of the scene" + } + ], + "images": [ + "images/task_00089_142_281_camera_top.jpg", + "images/task_00089_142_286_camera_top.jpg", + "images/task_00089_142_291_camera_top.jpg", + "images/task_00089_142_296_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00089", + "split": "eval", + "split_group_id": "task_00089__episode_000142", + "recording_id": null, + "episode_id": 142, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00089_142_799_camera_top_t-6.jpg", + "images/task_00089_142_802_camera_top_t-3.jpg", + "images/task_00089_142_805_camera_top_t0.jpg", + "images/task_00089_142_808_camera_top_t+3.jpg", + "images/task_00089_142_811_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00089", + "split": "eval", + "split_group_id": "task_00089__episode_000142", + "recording_id": null, + "episode_id": 142, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the right side of the scene\nB. toward the top of the scene\nC. toward the bottom of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the right side of the scene" + } + ], + "images": [ + "images/task_00090_2_1706_camera_top.jpg", + "images/task_00090_2_1711_camera_top.jpg", + "images/task_00090_2_1716_camera_top.jpg", + "images/task_00090_2_1721_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00090", + "split": "eval", + "split_group_id": "task_00090__episode_000002", + "recording_id": null, + "episode_id": 2, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the left side of the scene\nA. toward the right side of the scene\nB. toward the top of the scene\nC. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the left side of the scene" + } + ], + "images": [ + "images/task_00091_39_584_camera_top.jpg", + "images/task_00091_39_589_camera_top.jpg", + "images/task_00091_39_594_camera_top.jpg", + "images/task_00091_39_599_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00091", + "split": "eval", + "split_group_id": "task_00091__episode_000039", + "recording_id": null, + "episode_id": 39, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. approach" + } + ], + "images": [ + "images/task_00091_48_164_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00091", + "split": "eval", + "split_group_id": "task_00091__episode_000048", + "recording_id": null, + "episode_id": 48, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00091_53_334_camera_top.jpg", + "images/task_00091_53_337_camera_top.jpg", + "images/task_00091_53_340_camera_top.jpg", + "images/task_00091_53_343_camera_top.jpg", + "images/task_00091_53_346_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00091", + "split": "eval", + "split_group_id": "task_00091__episode_000053", + "recording_id": null, + "episode_id": 53, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00091_59_238_camera_top.jpg", + "images/task_00091_59_241_camera_top.jpg", + "images/task_00091_59_244_camera_top.jpg", + "images/task_00091_59_247_camera_top.jpg", + "images/task_00091_59_250_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00091", + "split": "eval", + "split_group_id": "task_00091__episode_000059", + "recording_id": null, + "episode_id": 59, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. release\nC. approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/task_00091_70_993_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00091", + "split": "eval", + "split_group_id": "task_00091__episode_000070", + "recording_id": null, + "episode_id": 70, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00092_5_687_camera_top.jpg", + "images/task_00092_5_568_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00092", + "split": "eval", + "split_group_id": "task_00092__episode_000005", + "recording_id": null, + "episode_id": 5, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00092_5_875_camera_top.jpg", + "images/task_00092_5_707_camera_top.jpg", + "images/task_00092_5_210_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00092", + "split": "eval", + "split_group_id": "task_00092__episode_000005", + "recording_id": null, + "episode_id": 5, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00092_5_199_camera_top.jpg", + "images/task_00092_5_289_camera_top.jpg", + "images/task_00092_5_733_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00092", + "split": "eval", + "split_group_id": "task_00092__episode_000005", + "recording_id": null, + "episode_id": 5, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00092_5_371_camera_top.jpg", + "images/task_00092_5_659_camera_top.jpg", + "images/task_00092_5_786_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00092", + "split": "eval", + "split_group_id": "task_00092__episode_000005", + "recording_id": null, + "episode_id": 5, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the left side of the scene\nA. toward the top of the scene\nB. toward the right side of the scene\nC. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the left side of the scene" + } + ], + "images": [ + "images/task_00092_5_896_camera_top.jpg", + "images/task_00092_5_901_camera_top.jpg", + "images/task_00092_5_906_camera_top.jpg", + "images/task_00092_5_911_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00092", + "split": "eval", + "split_group_id": "task_00092__episode_000005", + "recording_id": null, + "episode_id": 5, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the bottom of the scene\nB. toward the right side of the scene\nC. toward the top of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00092_5_567_camera_top.jpg", + "images/task_00092_5_572_camera_top.jpg", + "images/task_00092_5_577_camera_top.jpg", + "images/task_00092_5_582_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00092", + "split": "eval", + "split_group_id": "task_00092__episode_000005", + "recording_id": null, + "episode_id": 5, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00092_5_695_camera_top.jpg", + "images/task_00092_5_698_camera_top.jpg", + "images/task_00092_5_701_camera_top.jpg", + "images/task_00092_5_704_camera_top.jpg", + "images/task_00092_5_707_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00092", + "split": "eval", + "split_group_id": "task_00092__episode_000005", + "recording_id": null, + "episode_id": 5, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00092_5_233_camera_top.jpg", + "images/task_00092_5_236_camera_top.jpg", + "images/task_00092_5_239_camera_top.jpg", + "images/task_00092_5_242_camera_top.jpg", + "images/task_00092_5_245_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00092", + "split": "eval", + "split_group_id": "task_00092__episode_000005", + "recording_id": null, + "episode_id": 5, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the right side of the scene\nA. toward the bottom of the scene\nB. toward the left side of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the right side of the scene" + } + ], + "images": [ + "images/task_00092_67_276_camera_top.jpg", + "images/task_00092_67_281_camera_top.jpg", + "images/task_00092_67_286_camera_top.jpg", + "images/task_00092_67_291_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00092", + "split": "eval", + "split_group_id": "task_00092__episode_000067", + "recording_id": null, + "episode_id": 67, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. transfer\nC. release\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/task_00092_102_733_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00092", + "split": "eval", + "split_group_id": "task_00092__episode_000102", + "recording_id": null, + "episode_id": 102, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00092_110_289_camera_top.jpg", + "images/task_00092_110_292_camera_top.jpg", + "images/task_00092_110_295_camera_top.jpg", + "images/task_00092_110_298_camera_top.jpg", + "images/task_00092_110_301_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00092", + "split": "eval", + "split_group_id": "task_00092__episode_000110", + "recording_id": null, + "episode_id": 110, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the right side of the scene\nA. toward the left side of the scene\nB. toward the top of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the right side of the scene" + } + ], + "images": [ + "images/task_00093_0_456_camera_top.jpg", + "images/task_00093_0_461_camera_top.jpg", + "images/task_00093_0_466_camera_top.jpg", + "images/task_00093_0_471_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00093", + "split": "eval", + "split_group_id": "task_00093__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. pre-approach\nC. transfer\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. pre-approach" + } + ], + "images": [ + "images/task_00093_23_638_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00093", + "split": "eval", + "split_group_id": "task_00093__episode_000023", + "recording_id": null, + "episode_id": 23, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. approach\nC. contact\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. pre-approach" + } + ], + "images": [ + "images/task_00093_71_459_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00093", + "split": "eval", + "split_group_id": "task_00093__episode_000071", + "recording_id": null, + "episode_id": 71, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. contact\nC. approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. pre-approach" + } + ], + "images": [ + "images/task_00093_119_444_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00093", + "split": "eval", + "split_group_id": "task_00093__episode_000119", + "recording_id": null, + "episode_id": 119, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the left side of the scene\nB. toward the top of the scene\nC. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00093_122_492_camera_top.jpg", + "images/task_00093_122_497_camera_top.jpg", + "images/task_00093_122_502_camera_top.jpg", + "images/task_00093_122_507_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00093", + "split": "eval", + "split_group_id": "task_00093__episode_000122", + "recording_id": null, + "episode_id": 122, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00094_149_272_camera_top.jpg", + "images/task_00094_149_275_camera_top.jpg", + "images/task_00094_149_278_camera_top.jpg", + "images/task_00094_149_281_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00094", + "split": "eval", + "split_group_id": "task_00094__episode_000149", + "recording_id": null, + "episode_id": 149, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. release\nC. hold and carry\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/task_00094_149_583_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00094", + "split": "eval", + "split_group_id": "task_00094__episode_000149", + "recording_id": null, + "episode_id": 149, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the bottom of the scene\nA. toward the top of the scene\nB. toward the left side of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00094_149_790_camera_top.jpg", + "images/task_00094_149_795_camera_top.jpg", + "images/task_00094_149_800_camera_top.jpg", + "images/task_00094_149_805_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00094", + "split": "eval", + "split_group_id": "task_00094__episode_000149", + "recording_id": null, + "episode_id": 149, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00094_149_616_camera_top.jpg", + "images/task_00094_149_619_camera_top.jpg", + "images/task_00094_149_622_camera_top.jpg", + "images/task_00094_149_625_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00094", + "split": "eval", + "split_group_id": "task_00094__episode_000149", + "recording_id": null, + "episode_id": 149, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. contact\nC. transfer\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/task_00094_149_336_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00094", + "split": "eval", + "split_group_id": "task_00094__episode_000149", + "recording_id": null, + "episode_id": 149, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00094_149_361_camera_top_t-6.jpg", + "images/task_00094_149_364_camera_top_t-3.jpg", + "images/task_00094_149_367_camera_top_t0.jpg", + "images/task_00094_149_370_camera_top_t+3.jpg", + "images/task_00094_149_373_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00094", + "split": "eval", + "split_group_id": "task_00094__episode_000149", + "recording_id": null, + "episode_id": 149, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00094_149_855_camera_top.jpg", + "images/task_00094_149_858_camera_top.jpg", + "images/task_00094_149_861_camera_top.jpg", + "images/task_00094_149_864_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00094", + "split": "eval", + "split_group_id": "task_00094__episode_000149", + "recording_id": null, + "episode_id": 149, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00094_149_186_camera_top.jpg", + "images/task_00094_149_189_camera_top.jpg", + "images/task_00094_149_192_camera_top.jpg", + "images/task_00094_149_195_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00094", + "split": "eval", + "split_group_id": "task_00094__episode_000149", + "recording_id": null, + "episode_id": 149, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00094_149_363_camera_top.jpg", + "images/task_00094_149_157_camera_top.jpg", + "images/task_00094_149_188_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00094", + "split": "eval", + "split_group_id": "task_00094__episode_000149", + "recording_id": null, + "episode_id": 149, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00095_18_415_camera_top.jpg", + "images/task_00095_18_331_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00095", + "split": "eval", + "split_group_id": "task_00095__episode_000018", + "recording_id": null, + "episode_id": 18, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00095_18_364_camera_top.jpg", + "images/task_00095_18_497_camera_top.jpg", + "images/task_00095_18_953_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00095", + "split": "eval", + "split_group_id": "task_00095__episode_000018", + "recording_id": null, + "episode_id": 18, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00095_18_1047_camera_top_t-6.jpg", + "images/task_00095_18_1044_camera_top_t-3.jpg", + "images/task_00095_18_1047_camera_top_t0.jpg", + "images/task_00095_18_1050_camera_top_t+3.jpg", + "images/task_00095_18_1053_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00095", + "split": "eval", + "split_group_id": "task_00095__episode_000018", + "recording_id": null, + "episode_id": 18, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00095_18_648_camera_top.jpg", + "images/task_00095_18_419_camera_top.jpg", + "images/task_00095_18_960_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00095", + "split": "eval", + "split_group_id": "task_00095__episode_000018", + "recording_id": null, + "episode_id": 18, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00095_18_1191_camera_top.jpg", + "images/task_00095_18_1068_camera_top.jpg", + "images/task_00095_18_623_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00095", + "split": "eval", + "split_group_id": "task_00095__episode_000018", + "recording_id": null, + "episode_id": 18, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the left side of the scene\nA. toward the right side of the scene\nB. toward the bottom of the scene\nC. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the left side of the scene" + } + ], + "images": [ + "images/task_00095_18_849_camera_top.jpg", + "images/task_00095_18_854_camera_top.jpg", + "images/task_00095_18_859_camera_top.jpg", + "images/task_00095_18_864_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00095", + "split": "eval", + "split_group_id": "task_00095__episode_000018", + "recording_id": null, + "episode_id": 18, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00095_18_493_camera_top_t-6.jpg", + "images/task_00095_18_496_camera_top_t-3.jpg", + "images/task_00095_18_499_camera_top_t0.jpg", + "images/task_00095_18_502_camera_top_t+3.jpg", + "images/task_00095_18_505_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00095", + "split": "eval", + "split_group_id": "task_00095__episode_000018", + "recording_id": null, + "episode_id": 18, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00095_36_251_camera_top.jpg", + "images/task_00095_36_853_camera_top.jpg", + "images/task_00095_36_316_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00095", + "split": "eval", + "split_group_id": "task_00095__episode_000036", + "recording_id": null, + "episode_id": 36, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. hold and carry\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. hold and carry" + } + ], + "images": [ + "images/task_00095_36_587_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00095", + "split": "eval", + "split_group_id": "task_00095__episode_000036", + "recording_id": null, + "episode_id": 36, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00095_36_254_camera_top.jpg", + "images/task_00095_36_453_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00095", + "split": "eval", + "split_group_id": "task_00095__episode_000036", + "recording_id": null, + "episode_id": 36, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the left side of the scene\nA. toward the bottom of the scene\nB. toward the right side of the scene\nC. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the left side of the scene" + } + ], + "images": [ + "images/task_00095_36_713_camera_top.jpg", + "images/task_00095_36_718_camera_top.jpg", + "images/task_00095_36_723_camera_top.jpg", + "images/task_00095_36_728_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00095", + "split": "eval", + "split_group_id": "task_00095__episode_000036", + "recording_id": null, + "episode_id": 36, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00095_36_440_camera_top.jpg", + "images/task_00095_36_443_camera_top.jpg", + "images/task_00095_36_446_camera_top.jpg", + "images/task_00095_36_449_camera_top.jpg", + "images/task_00095_36_452_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00095", + "split": "eval", + "split_group_id": "task_00095__episode_000036", + "recording_id": null, + "episode_id": 36, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the left side of the scene\nA. toward the top of the scene\nB. toward the bottom of the scene\nC. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the left side of the scene" + } + ], + "images": [ + "images/task_00095_36_706_camera_top.jpg", + "images/task_00095_36_711_camera_top.jpg", + "images/task_00095_36_716_camera_top.jpg", + "images/task_00095_36_721_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00095", + "split": "eval", + "split_group_id": "task_00095__episode_000036", + "recording_id": null, + "episode_id": 36, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the left side of the scene\nC. toward the bottom of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00095_36_1107_camera_top.jpg", + "images/task_00095_36_1112_camera_top.jpg", + "images/task_00095_36_1117_camera_top.jpg", + "images/task_00095_36_1122_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00095", + "split": "eval", + "split_group_id": "task_00095__episode_000036", + "recording_id": null, + "episode_id": 36, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the top of the scene\nB. toward the bottom of the scene\nC. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00095_36_1113_camera_top.jpg", + "images/task_00095_36_1118_camera_top.jpg", + "images/task_00095_36_1123_camera_top.jpg", + "images/task_00095_36_1128_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00095", + "split": "eval", + "split_group_id": "task_00095__episode_000036", + "recording_id": null, + "episode_id": 36, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_00095_94_804_camera_top.jpg", + "images/task_00095_94_483_camera_top.jpg", + "images/task_00095_94_227_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00095", + "split": "eval", + "split_group_id": "task_00095__episode_000094", + "recording_id": null, + "episode_id": 94, + "camera": "camera_top", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00095_179_491_camera_top.jpg", + "images/task_00095_179_494_camera_top.jpg", + "images/task_00095_179_497_camera_top.jpg", + "images/task_00095_179_500_camera_top.jpg", + "images/task_00095_179_503_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00095", + "split": "eval", + "split_group_id": "task_00095__episode_000179", + "recording_id": null, + "episode_id": 179, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00095_179_742_camera_top.jpg", + "images/task_00095_179_462_camera_top.jpg", + "images/task_00095_179_904_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00095", + "split": "eval", + "split_group_id": "task_00095__episode_000179", + "recording_id": null, + "episode_id": 179, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00095_179_924_camera_top.jpg", + "images/task_00095_179_605_camera_top.jpg", + "images/task_00095_179_758_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00095", + "split": "eval", + "split_group_id": "task_00095__episode_000179", + "recording_id": null, + "episode_id": 179, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00095_179_764_camera_top.jpg", + "images/task_00095_179_691_camera_top.jpg", + "images/task_00095_179_821_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00095", + "split": "eval", + "split_group_id": "task_00095__episode_000179", + "recording_id": null, + "episode_id": 179, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00095_179_454_camera_top.jpg", + "images/task_00095_179_824_camera_top.jpg", + "images/task_00095_179_695_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00095", + "split": "eval", + "split_group_id": "task_00095__episode_000179", + "recording_id": null, + "episode_id": 179, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. transfer\nC. pre-approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/task_00095_179_974_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00095", + "split": "eval", + "split_group_id": "task_00095__episode_000179", + "recording_id": null, + "episode_id": 179, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the left side of the scene\nA. toward the right side of the scene\nC. toward the top of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the left side of the scene" + } + ], + "images": [ + "images/task_00095_179_426_camera_top.jpg", + "images/task_00095_179_431_camera_top.jpg", + "images/task_00095_179_436_camera_top.jpg", + "images/task_00095_179_441_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00095", + "split": "eval", + "split_group_id": "task_00095__episode_000179", + "recording_id": null, + "episode_id": 179, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. release\nC. contact\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/task_00095_179_864_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00095", + "split": "eval", + "split_group_id": "task_00095__episode_000179", + "recording_id": null, + "episode_id": 179, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00095_179_612_camera_top.jpg", + "images/task_00095_179_428_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00095", + "split": "eval", + "split_group_id": "task_00095__episode_000179", + "recording_id": null, + "episode_id": 179, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00096_143_301_camera_top.jpg", + "images/task_00096_143_257_camera_top.jpg", + "images/task_00096_143_828_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00096", + "split": "eval", + "split_group_id": "task_00096__episode_000143", + "recording_id": null, + "episode_id": 143, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00096_143_665_camera_top.jpg", + "images/task_00096_143_668_camera_top.jpg", + "images/task_00096_143_671_camera_top.jpg", + "images/task_00096_143_674_camera_top.jpg", + "images/task_00096_143_677_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00096", + "split": "eval", + "split_group_id": "task_00096__episode_000143", + "recording_id": null, + "episode_id": 143, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the bottom of the scene\nC. toward the top of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00096_143_1064_camera_top.jpg", + "images/task_00096_143_1069_camera_top.jpg", + "images/task_00096_143_1074_camera_top.jpg", + "images/task_00096_143_1079_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00096", + "split": "eval", + "split_group_id": "task_00096__episode_000143", + "recording_id": null, + "episode_id": 143, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the bottom of the scene\nA. toward the top of the scene\nB. toward the left side of the scene\nC. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00096_143_305_camera_top.jpg", + "images/task_00096_143_310_camera_top.jpg", + "images/task_00096_143_315_camera_top.jpg", + "images/task_00096_143_320_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00096", + "split": "eval", + "split_group_id": "task_00096__episode_000143", + "recording_id": null, + "episode_id": 143, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00096_143_462_camera_top_t-6.jpg", + "images/task_00096_143_465_camera_top_t-3.jpg", + "images/task_00096_143_468_camera_top_t0.jpg", + "images/task_00096_143_471_camera_top_t+3.jpg", + "images/task_00096_143_474_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00096", + "split": "eval", + "split_group_id": "task_00096__episode_000143", + "recording_id": null, + "episode_id": 143, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00096_143_332_camera_top.jpg", + "images/task_00096_143_1184_camera_top.jpg", + "images/task_00096_143_917_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00096", + "split": "eval", + "split_group_id": "task_00096__episode_000143", + "recording_id": null, + "episode_id": 143, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the right side of the scene\nB. toward the top of the scene\nC. toward the left side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the right side of the scene" + } + ], + "images": [ + "images/task_00096_143_1026_camera_top.jpg", + "images/task_00096_143_1031_camera_top.jpg", + "images/task_00096_143_1036_camera_top.jpg", + "images/task_00096_143_1041_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00096", + "split": "eval", + "split_group_id": "task_00096__episode_000143", + "recording_id": null, + "episode_id": 143, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00096_143_387_camera_top.jpg", + "images/task_00096_143_390_camera_top.jpg", + "images/task_00096_143_393_camera_top.jpg", + "images/task_00096_143_396_camera_top.jpg", + "images/task_00096_143_399_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00096", + "split": "eval", + "split_group_id": "task_00096__episode_000143", + "recording_id": null, + "episode_id": 143, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. pre-approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. pre-approach" + } + ], + "images": [ + "images/task_00096_143_225_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00096", + "split": "eval", + "split_group_id": "task_00096__episode_000143", + "recording_id": null, + "episode_id": 143, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00097_45_466_camera_top.jpg", + "images/task_00097_45_469_camera_top.jpg", + "images/task_00097_45_472_camera_top.jpg", + "images/task_00097_45_475_camera_top.jpg", + "images/task_00097_45_478_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00097", + "split": "eval", + "split_group_id": "task_00097__episode_000045", + "recording_id": null, + "episode_id": 45, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the bottom of the scene\nC. toward the top of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00097_59_746_camera_top.jpg", + "images/task_00097_59_751_camera_top.jpg", + "images/task_00097_59_756_camera_top.jpg", + "images/task_00097_59_761_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00097", + "split": "eval", + "split_group_id": "task_00097__episode_000059", + "recording_id": null, + "episode_id": 59, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00097_59_740_camera_top_t-6.jpg", + "images/task_00097_59_743_camera_top_t-3.jpg", + "images/task_00097_59_746_camera_top_t0.jpg", + "images/task_00097_59_749_camera_top_t+3.jpg", + "images/task_00097_59_752_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00097", + "split": "eval", + "split_group_id": "task_00097__episode_000059", + "recording_id": null, + "episode_id": 59, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. hold and carry\nC. transfer\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/task_00097_59_1_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00097", + "split": "eval", + "split_group_id": "task_00097__episode_000059", + "recording_id": null, + "episode_id": 59, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00097_59_304_camera_top.jpg", + "images/task_00097_59_307_camera_top.jpg", + "images/task_00097_59_310_camera_top.jpg", + "images/task_00097_59_313_camera_top.jpg", + "images/task_00097_59_316_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00097", + "split": "eval", + "split_group_id": "task_00097__episode_000059", + "recording_id": null, + "episode_id": 59, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00097_59_780_camera_top.jpg", + "images/task_00097_59_783_camera_top.jpg", + "images/task_00097_59_786_camera_top.jpg", + "images/task_00097_59_789_camera_top.jpg", + "images/task_00097_59_792_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00097", + "split": "eval", + "split_group_id": "task_00097__episode_000059", + "recording_id": null, + "episode_id": 59, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_00097_59_392_camera_top.jpg", + "images/task_00097_59_825_camera_top.jpg", + "images/task_00097_59_433_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00097", + "split": "eval", + "split_group_id": "task_00097__episode_000059", + "recording_id": null, + "episode_id": 59, + "camera": "camera_top", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00097_59_420_camera_top.jpg", + "images/task_00097_59_834_camera_top.jpg", + "images/task_00097_59_751_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00097", + "split": "eval", + "split_group_id": "task_00097__episode_000059", + "recording_id": null, + "episode_id": 59, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00097_61_106_camera_top.jpg", + "images/task_00097_61_247_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00097", + "split": "eval", + "split_group_id": "task_00097__episode_000061", + "recording_id": null, + "episode_id": 61, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_00097_61_501_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00097", + "split": "eval", + "split_group_id": "task_00097__episode_000061", + "recording_id": null, + "episode_id": 61, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00097_61_143_camera_top.jpg", + "images/task_00097_61_146_camera_top.jpg", + "images/task_00097_61_149_camera_top.jpg", + "images/task_00097_61_152_camera_top.jpg", + "images/task_00097_61_155_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00097", + "split": "eval", + "split_group_id": "task_00097__episode_000061", + "recording_id": null, + "episode_id": 61, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. hold and carry\nC. approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. approach" + } + ], + "images": [ + "images/task_00097_61_487_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00097", + "split": "eval", + "split_group_id": "task_00097__episode_000061", + "recording_id": null, + "episode_id": 61, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/task_00097_61_4_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00097", + "split": "eval", + "split_group_id": "task_00097__episode_000061", + "recording_id": null, + "episode_id": 61, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the bottom of the scene\nB. toward the left side of the scene\nC. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00097_61_219_camera_top.jpg", + "images/task_00097_61_224_camera_top.jpg", + "images/task_00097_61_229_camera_top.jpg", + "images/task_00097_61_234_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00097", + "split": "eval", + "split_group_id": "task_00097__episode_000061", + "recording_id": null, + "episode_id": 61, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_00097_61_597_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00097", + "split": "eval", + "split_group_id": "task_00097__episode_000061", + "recording_id": null, + "episode_id": 61, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. transfer\nC. release\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/task_00097_61_693_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00097", + "split": "eval", + "split_group_id": "task_00097__episode_000061", + "recording_id": null, + "episode_id": 61, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_00097_61_356_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00097", + "split": "eval", + "split_group_id": "task_00097__episode_000061", + "recording_id": null, + "episode_id": 61, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. transfer\nC. release\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/task_00097_78_365_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00097", + "split": "eval", + "split_group_id": "task_00097__episode_000078", + "recording_id": null, + "episode_id": 78, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00097_92_743_camera_top_t-6.jpg", + "images/task_00097_92_746_camera_top_t-3.jpg", + "images/task_00097_92_749_camera_top_t0.jpg", + "images/task_00097_92_752_camera_top_t+3.jpg", + "images/task_00097_92_755_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00097", + "split": "eval", + "split_group_id": "task_00097__episode_000092", + "recording_id": null, + "episode_id": 92, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. hold and carry\nC. contact\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/task_00097_92_490_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00097", + "split": "eval", + "split_group_id": "task_00097__episode_000092", + "recording_id": null, + "episode_id": 92, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00097_92_234_camera_top.jpg", + "images/task_00097_92_237_camera_top.jpg", + "images/task_00097_92_240_camera_top.jpg", + "images/task_00097_92_243_camera_top.jpg", + "images/task_00097_92_246_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00097", + "split": "eval", + "split_group_id": "task_00097__episode_000092", + "recording_id": null, + "episode_id": 92, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the right side of the scene\nB. toward the bottom of the scene\nC. toward the left side of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the right side of the scene" + } + ], + "images": [ + "images/task_00097_92_733_camera_top.jpg", + "images/task_00097_92_738_camera_top.jpg", + "images/task_00097_92_743_camera_top.jpg", + "images/task_00097_92_748_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00097", + "split": "eval", + "split_group_id": "task_00097__episode_000092", + "recording_id": null, + "episode_id": 92, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00097_92_469_camera_top.jpg", + "images/task_00097_92_610_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00097", + "split": "eval", + "split_group_id": "task_00097__episode_000092", + "recording_id": null, + "episode_id": 92, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00097_92_569_camera_top.jpg", + "images/task_00097_92_672_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00097", + "split": "eval", + "split_group_id": "task_00097__episode_000092", + "recording_id": null, + "episode_id": 92, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the right side of the scene\nA. toward the bottom of the scene\nB. toward the top of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the right side of the scene" + } + ], + "images": [ + "images/task_00097_92_368_camera_top.jpg", + "images/task_00097_92_373_camera_top.jpg", + "images/task_00097_92_378_camera_top.jpg", + "images/task_00097_92_383_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00097", + "split": "eval", + "split_group_id": "task_00097__episode_000092", + "recording_id": null, + "episode_id": 92, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the left side of the scene\nA. toward the right side of the scene\nB. toward the top of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the left side of the scene" + } + ], + "images": [ + "images/task_00097_104_630_camera_top.jpg", + "images/task_00097_104_635_camera_top.jpg", + "images/task_00097_104_640_camera_top.jpg", + "images/task_00097_104_645_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00097", + "split": "eval", + "split_group_id": "task_00097__episode_000104", + "recording_id": null, + "episode_id": 104, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00097_104_618_camera_top_t-6.jpg", + "images/task_00097_104_621_camera_top_t-3.jpg", + "images/task_00097_104_624_camera_top_t0.jpg", + "images/task_00097_104_627_camera_top_t+3.jpg", + "images/task_00097_104_630_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00097", + "split": "eval", + "split_group_id": "task_00097__episode_000104", + "recording_id": null, + "episode_id": 104, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00097_104_90_camera_top.jpg", + "images/task_00097_104_298_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00097", + "split": "eval", + "split_group_id": "task_00097__episode_000104", + "recording_id": null, + "episode_id": 104, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00097_104_364_camera_top.jpg", + "images/task_00097_104_367_camera_top.jpg", + "images/task_00097_104_370_camera_top.jpg", + "images/task_00097_104_373_camera_top.jpg", + "images/task_00097_104_376_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00097", + "split": "eval", + "split_group_id": "task_00097__episode_000104", + "recording_id": null, + "episode_id": 104, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. approach\nC. transfer\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. approach" + } + ], + "images": [ + "images/task_00097_104_67_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00097", + "split": "eval", + "split_group_id": "task_00097__episode_000104", + "recording_id": null, + "episode_id": 104, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. pre-approach\nC. release\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/task_00097_104_534_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00097", + "split": "eval", + "split_group_id": "task_00097__episode_000104", + "recording_id": null, + "episode_id": 104, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00097_104_146_camera_top.jpg", + "images/task_00097_104_149_camera_top.jpg", + "images/task_00097_104_152_camera_top.jpg", + "images/task_00097_104_155_camera_top.jpg", + "images/task_00097_104_158_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00097", + "split": "eval", + "split_group_id": "task_00097__episode_000104", + "recording_id": null, + "episode_id": 104, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. transfer\nC. release\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/task_00097_104_459_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00097", + "split": "eval", + "split_group_id": "task_00097__episode_000104", + "recording_id": null, + "episode_id": 104, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00097_104_566_camera_top.jpg", + "images/task_00097_104_90_camera_top.jpg", + "images/task_00097_104_386_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00097", + "split": "eval", + "split_group_id": "task_00097__episode_000104", + "recording_id": null, + "episode_id": 104, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. pre-approach" + } + ], + "images": [ + "images/task_00097_107_301_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00097", + "split": "eval", + "split_group_id": "task_00097__episode_000107", + "recording_id": null, + "episode_id": 107, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. hold and carry\nC. pre-approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. hold and carry" + } + ], + "images": [ + "images/task_00097_107_461_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00097", + "split": "eval", + "split_group_id": "task_00097__episode_000107", + "recording_id": null, + "episode_id": 107, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. contact\nC. transfer\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/task_00097_107_536_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00097", + "split": "eval", + "split_group_id": "task_00097__episode_000107", + "recording_id": null, + "episode_id": 107, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the top of the scene\nB. toward the bottom of the scene\nC. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00097_107_377_camera_top.jpg", + "images/task_00097_107_382_camera_top.jpg", + "images/task_00097_107_387_camera_top.jpg", + "images/task_00097_107_392_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00097", + "split": "eval", + "split_group_id": "task_00097__episode_000107", + "recording_id": null, + "episode_id": 107, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00097_118_295_camera_top.jpg", + "images/task_00097_118_298_camera_top.jpg", + "images/task_00097_118_301_camera_top.jpg", + "images/task_00097_118_304_camera_top.jpg", + "images/task_00097_118_307_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00097", + "split": "eval", + "split_group_id": "task_00097__episode_000118", + "recording_id": null, + "episode_id": 118, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00097_118_267_camera_top.jpg", + "images/task_00097_118_270_camera_top.jpg", + "images/task_00097_118_273_camera_top.jpg", + "images/task_00097_118_276_camera_top.jpg", + "images/task_00097_118_279_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00097", + "split": "eval", + "split_group_id": "task_00097__episode_000118", + "recording_id": null, + "episode_id": 118, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the right side of the scene\nB. toward the bottom of the scene\nC. toward the top of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the right side of the scene" + } + ], + "images": [ + "images/task_00097_118_200_camera_top.jpg", + "images/task_00097_118_205_camera_top.jpg", + "images/task_00097_118_210_camera_top.jpg", + "images/task_00097_118_215_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00097", + "split": "eval", + "split_group_id": "task_00097__episode_000118", + "recording_id": null, + "episode_id": 118, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00097_118_393_camera_top.jpg", + "images/task_00097_118_342_camera_top.jpg", + "images/task_00097_118_196_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00097", + "split": "eval", + "split_group_id": "task_00097__episode_000118", + "recording_id": null, + "episode_id": 118, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_00097_118_27_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00097", + "split": "eval", + "split_group_id": "task_00097__episode_000118", + "recording_id": null, + "episode_id": 118, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_00097_118_356_camera_top.jpg", + "images/task_00097_118_515_camera_top.jpg", + "images/task_00097_118_200_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00097", + "split": "eval", + "split_group_id": "task_00097__episode_000118", + "recording_id": null, + "episode_id": 118, + "camera": "camera_top", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. release\nC. contact\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/task_00097_118_222_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00097", + "split": "eval", + "split_group_id": "task_00097__episode_000118", + "recording_id": null, + "episode_id": 118, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00097_118_477_camera_top_t-6.jpg", + "images/task_00097_118_480_camera_top_t-3.jpg", + "images/task_00097_118_483_camera_top_t0.jpg", + "images/task_00097_118_486_camera_top_t+3.jpg", + "images/task_00097_118_489_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00097", + "split": "eval", + "split_group_id": "task_00097__episode_000118", + "recording_id": null, + "episode_id": 118, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_00097_118_473_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00097", + "split": "eval", + "split_group_id": "task_00097__episode_000118", + "recording_id": null, + "episode_id": 118, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_00097_118_18_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00097", + "split": "eval", + "split_group_id": "task_00097__episode_000118", + "recording_id": null, + "episode_id": 118, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. transfer\nC. contact\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/task_00098_90_507_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00098", + "split": "eval", + "split_group_id": "task_00098__episode_000090", + "recording_id": null, + "episode_id": 90, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. release\nC. transfer\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/task_00098_94_712_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00098", + "split": "eval", + "split_group_id": "task_00098__episode_000094", + "recording_id": null, + "episode_id": 94, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00098_94_147_camera_top.jpg", + "images/task_00098_94_150_camera_top.jpg", + "images/task_00098_94_153_camera_top.jpg", + "images/task_00098_94_156_camera_top.jpg", + "images/task_00098_94_159_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00098", + "split": "eval", + "split_group_id": "task_00098__episode_000094", + "recording_id": null, + "episode_id": 94, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00098_94_126_camera_top.jpg", + "images/task_00098_94_358_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00098", + "split": "eval", + "split_group_id": "task_00098__episode_000094", + "recording_id": null, + "episode_id": 94, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00098_94_375_camera_top.jpg", + "images/task_00098_94_174_camera_top.jpg", + "images/task_00098_94_426_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00098", + "split": "eval", + "split_group_id": "task_00098__episode_000094", + "recording_id": null, + "episode_id": 94, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00098_94_576_camera_top_t-6.jpg", + "images/task_00098_94_579_camera_top_t-3.jpg", + "images/task_00098_94_582_camera_top_t0.jpg", + "images/task_00098_94_585_camera_top_t+3.jpg", + "images/task_00098_94_588_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00098", + "split": "eval", + "split_group_id": "task_00098__episode_000094", + "recording_id": null, + "episode_id": 94, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00098_94_208_camera_top.jpg", + "images/task_00098_94_322_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00098", + "split": "eval", + "split_group_id": "task_00098__episode_000094", + "recording_id": null, + "episode_id": 94, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00098_94_373_camera_top_t-6.jpg", + "images/task_00098_94_376_camera_top_t-3.jpg", + "images/task_00098_94_379_camera_top_t0.jpg", + "images/task_00098_94_382_camera_top_t+3.jpg", + "images/task_00098_94_385_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00098", + "split": "eval", + "split_group_id": "task_00098__episode_000094", + "recording_id": null, + "episode_id": 94, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00098_94_363_camera_top_t-6.jpg", + "images/task_00098_94_366_camera_top_t-3.jpg", + "images/task_00098_94_369_camera_top_t0.jpg", + "images/task_00098_94_372_camera_top_t+3.jpg", + "images/task_00098_94_375_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00098", + "split": "eval", + "split_group_id": "task_00098__episode_000094", + "recording_id": null, + "episode_id": 94, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00098_94_223_camera_top.jpg", + "images/task_00098_94_226_camera_top.jpg", + "images/task_00098_94_229_camera_top.jpg", + "images/task_00098_94_232_camera_top.jpg", + "images/task_00098_94_235_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00098", + "split": "eval", + "split_group_id": "task_00098__episode_000094", + "recording_id": null, + "episode_id": 94, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_00098_111_432_camera_top.jpg", + "images/task_00098_111_117_camera_top.jpg", + "images/task_00098_111_828_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00098", + "split": "eval", + "split_group_id": "task_00098__episode_000111", + "recording_id": null, + "episode_id": 111, + "camera": "camera_top", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00098_111_303_camera_top.jpg", + "images/task_00098_111_306_camera_top.jpg", + "images/task_00098_111_309_camera_top.jpg", + "images/task_00098_111_312_camera_top.jpg", + "images/task_00098_111_315_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00098", + "split": "eval", + "split_group_id": "task_00098__episode_000111", + "recording_id": null, + "episode_id": 111, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00098_111_665_camera_top.jpg", + "images/task_00098_111_668_camera_top.jpg", + "images/task_00098_111_671_camera_top.jpg", + "images/task_00098_111_674_camera_top.jpg", + "images/task_00098_111_677_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00098", + "split": "eval", + "split_group_id": "task_00098__episode_000111", + "recording_id": null, + "episode_id": 111, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the left side of the scene\nA. toward the bottom of the scene\nB. toward the top of the scene\nC. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the left side of the scene" + } + ], + "images": [ + "images/task_00098_111_584_camera_top.jpg", + "images/task_00098_111_589_camera_top.jpg", + "images/task_00098_111_594_camera_top.jpg", + "images/task_00098_111_599_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00098", + "split": "eval", + "split_group_id": "task_00098__episode_000111", + "recording_id": null, + "episode_id": 111, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the right side of the scene\nA. toward the left side of the scene\nB. toward the top of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the right side of the scene" + } + ], + "images": [ + "images/task_00098_111_884_camera_top.jpg", + "images/task_00098_111_889_camera_top.jpg", + "images/task_00098_111_894_camera_top.jpg", + "images/task_00098_111_899_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00098", + "split": "eval", + "split_group_id": "task_00098__episode_000111", + "recording_id": null, + "episode_id": 111, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the left side of the scene\nB. toward the bottom of the scene\nC. toward the right side of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the left side of the scene" + } + ], + "images": [ + "images/task_00098_111_577_camera_top.jpg", + "images/task_00098_111_582_camera_top.jpg", + "images/task_00098_111_587_camera_top.jpg", + "images/task_00098_111_592_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00098", + "split": "eval", + "split_group_id": "task_00098__episode_000111", + "recording_id": null, + "episode_id": 111, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the left side of the scene\nA. toward the right side of the scene\nC. toward the top of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the left side of the scene" + } + ], + "images": [ + "images/task_00098_111_579_camera_top.jpg", + "images/task_00098_111_584_camera_top.jpg", + "images/task_00098_111_589_camera_top.jpg", + "images/task_00098_111_594_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00098", + "split": "eval", + "split_group_id": "task_00098__episode_000111", + "recording_id": null, + "episode_id": 111, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00098_111_314_camera_top.jpg", + "images/task_00098_111_121_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00098", + "split": "eval", + "split_group_id": "task_00098__episode_000111", + "recording_id": null, + "episode_id": 111, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00098_153_189_camera_top.jpg", + "images/task_00098_153_192_camera_top.jpg", + "images/task_00098_153_195_camera_top.jpg", + "images/task_00098_153_198_camera_top.jpg", + "images/task_00098_153_201_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00098", + "split": "eval", + "split_group_id": "task_00098__episode_000153", + "recording_id": null, + "episode_id": 153, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00098_153_211_camera_top.jpg", + "images/task_00098_153_214_camera_top.jpg", + "images/task_00098_153_217_camera_top.jpg", + "images/task_00098_153_220_camera_top.jpg", + "images/task_00098_153_223_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00098", + "split": "eval", + "split_group_id": "task_00098__episode_000153", + "recording_id": null, + "episode_id": 153, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00098_153_1405_camera_top_t-6.jpg", + "images/task_00098_153_1408_camera_top_t-3.jpg", + "images/task_00098_153_1411_camera_top_t0.jpg", + "images/task_00098_153_1414_camera_top_t+3.jpg", + "images/task_00098_153_1417_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00098", + "split": "eval", + "split_group_id": "task_00098__episode_000153", + "recording_id": null, + "episode_id": 153, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the right side of the scene\nA. toward the bottom of the scene\nB. toward the top of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the right side of the scene" + } + ], + "images": [ + "images/task_00098_153_854_camera_top.jpg", + "images/task_00098_153_859_camera_top.jpg", + "images/task_00098_153_864_camera_top.jpg", + "images/task_00098_153_869_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00098", + "split": "eval", + "split_group_id": "task_00098__episode_000153", + "recording_id": null, + "episode_id": 153, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. release\nC. approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/task_00098_153_763_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00098", + "split": "eval", + "split_group_id": "task_00098__episode_000153", + "recording_id": null, + "episode_id": 153, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00098_153_368_camera_top.jpg", + "images/task_00098_153_371_camera_top.jpg", + "images/task_00098_153_374_camera_top.jpg", + "images/task_00098_153_377_camera_top.jpg", + "images/task_00098_153_380_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00098", + "split": "eval", + "split_group_id": "task_00098__episode_000153", + "recording_id": null, + "episode_id": 153, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00098_153_1098_camera_top_t-6.jpg", + "images/task_00098_153_1101_camera_top_t-3.jpg", + "images/task_00098_153_1104_camera_top_t0.jpg", + "images/task_00098_153_1107_camera_top_t+3.jpg", + "images/task_00098_153_1110_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00098", + "split": "eval", + "split_group_id": "task_00098__episode_000153", + "recording_id": null, + "episode_id": 153, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00098_153_171_camera_top.jpg", + "images/task_00098_153_578_camera_top.jpg", + "images/task_00098_153_797_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00098", + "split": "eval", + "split_group_id": "task_00098__episode_000153", + "recording_id": null, + "episode_id": 153, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00098_153_794_camera_top.jpg", + "images/task_00098_153_1051_camera_top.jpg", + "images/task_00098_153_334_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00098", + "split": "eval", + "split_group_id": "task_00098__episode_000153", + "recording_id": null, + "episode_id": 153, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_00098_166_467_camera_top.jpg", + "images/task_00098_166_192_camera_top.jpg", + "images/task_00098_166_1190_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00098", + "split": "eval", + "split_group_id": "task_00098__episode_000166", + "recording_id": null, + "episode_id": 166, + "camera": "camera_top", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00098_166_158_camera_top.jpg", + "images/task_00098_166_685_camera_top.jpg", + "images/task_00098_166_401_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00098", + "split": "eval", + "split_group_id": "task_00098__episode_000166", + "recording_id": null, + "episode_id": 166, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. release\nC. contact\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/task_00098_166_463_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00098", + "split": "eval", + "split_group_id": "task_00098__episode_000166", + "recording_id": null, + "episode_id": 166, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the top of the scene\nC. toward the bottom of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00098_166_1264_camera_top.jpg", + "images/task_00098_166_1269_camera_top.jpg", + "images/task_00098_166_1274_camera_top.jpg", + "images/task_00098_166_1279_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00098", + "split": "eval", + "split_group_id": "task_00098__episode_000166", + "recording_id": null, + "episode_id": 166, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00098_166_493_camera_top.jpg", + "images/task_00098_166_496_camera_top.jpg", + "images/task_00098_166_499_camera_top.jpg", + "images/task_00098_166_502_camera_top.jpg", + "images/task_00098_166_505_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00098", + "split": "eval", + "split_group_id": "task_00098__episode_000166", + "recording_id": null, + "episode_id": 166, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00098_166_442_camera_top.jpg", + "images/task_00098_166_445_camera_top.jpg", + "images/task_00098_166_448_camera_top.jpg", + "images/task_00098_166_451_camera_top.jpg", + "images/task_00098_166_454_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00098", + "split": "eval", + "split_group_id": "task_00098__episode_000166", + "recording_id": null, + "episode_id": 166, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_00098_166_1043_camera_top.jpg", + "images/task_00098_166_467_camera_top.jpg", + "images/task_00098_166_169_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00098", + "split": "eval", + "split_group_id": "task_00098__episode_000166", + "recording_id": null, + "episode_id": 166, + "camera": "camera_top", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the left side of the scene\nA. toward the right side of the scene\nB. toward the bottom of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the left side of the scene" + } + ], + "images": [ + "images/task_00098_193_382_camera_top.jpg", + "images/task_00098_193_387_camera_top.jpg", + "images/task_00098_193_392_camera_top.jpg", + "images/task_00098_193_397_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00098", + "split": "eval", + "split_group_id": "task_00098__episode_000193", + "recording_id": null, + "episode_id": 193, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00099_0_460_camera_top.jpg", + "images/task_00099_0_463_camera_top.jpg", + "images/task_00099_0_466_camera_top.jpg", + "images/task_00099_0_469_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00099_0_732_camera_top.jpg", + "images/task_00099_0_608_camera_top.jpg", + "images/task_00099_0_221_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00099_0_133_camera_top.jpg", + "images/task_00099_0_136_camera_top.jpg", + "images/task_00099_0_139_camera_top.jpg", + "images/task_00099_0_142_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. transfer\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/task_00099_0_374_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the top of the scene\nA. toward the bottom of the scene\nC. toward the right side of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the top of the scene" + } + ], + "images": [ + "images/task_00099_0_595_camera_top.jpg", + "images/task_00099_0_600_camera_top.jpg", + "images/task_00099_0_605_camera_top.jpg", + "images/task_00099_0_610_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00099_0_314_camera_top.jpg", + "images/task_00099_0_515_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. contact\nC. release\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. approach" + } + ], + "images": [ + "images/task_00099_0_580_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00099_0_395_camera_top.jpg", + "images/task_00099_0_398_camera_top.jpg", + "images/task_00099_0_401_camera_top.jpg", + "images/task_00099_0_404_camera_top.jpg", + "images/task_00099_0_407_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. release\nC. contact\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/task_00099_0_238_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00099_0_693_camera_top.jpg", + "images/task_00099_0_576_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00099_0_592_camera_top.jpg", + "images/task_00099_0_595_camera_top.jpg", + "images/task_00099_0_598_camera_top.jpg", + "images/task_00099_0_601_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. transfer\nC. release\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/task_00099_0_615_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00099_0_455_camera_top.jpg", + "images/task_00099_0_458_camera_top.jpg", + "images/task_00099_0_461_camera_top.jpg", + "images/task_00099_0_464_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00099_0_452_camera_top_t-6.jpg", + "images/task_00099_0_455_camera_top_t-3.jpg", + "images/task_00099_0_458_camera_top_t0.jpg", + "images/task_00099_0_461_camera_top_t+3.jpg", + "images/task_00099_0_464_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00099_0_589_camera_top.jpg", + "images/task_00099_0_592_camera_top.jpg", + "images/task_00099_0_595_camera_top.jpg", + "images/task_00099_0_598_camera_top.jpg", + "images/task_00099_0_601_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00099_0_594_camera_top.jpg", + "images/task_00099_0_597_camera_top.jpg", + "images/task_00099_0_600_camera_top.jpg", + "images/task_00099_0_603_camera_top.jpg", + "images/task_00099_0_606_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00099_0_453_camera_top.jpg", + "images/task_00099_0_456_camera_top.jpg", + "images/task_00099_0_459_camera_top.jpg", + "images/task_00099_0_462_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00099_5_892_camera_top.jpg", + "images/task_00099_5_895_camera_top.jpg", + "images/task_00099_5_898_camera_top.jpg", + "images/task_00099_5_901_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000005", + "recording_id": null, + "episode_id": 5, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the top of the scene\nA. toward the bottom of the scene\nB. toward the right side of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the top of the scene" + } + ], + "images": [ + "images/task_00099_5_489_camera_top.jpg", + "images/task_00099_5_494_camera_top.jpg", + "images/task_00099_5_499_camera_top.jpg", + "images/task_00099_5_504_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000005", + "recording_id": null, + "episode_id": 5, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the right side of the scene\nB. toward the left side of the scene\nC. toward the bottom of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the right side of the scene" + } + ], + "images": [ + "images/task_00099_5_710_camera_top.jpg", + "images/task_00099_5_715_camera_top.jpg", + "images/task_00099_5_720_camera_top.jpg", + "images/task_00099_5_725_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000005", + "recording_id": null, + "episode_id": 5, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00099_5_896_camera_top.jpg", + "images/task_00099_5_899_camera_top.jpg", + "images/task_00099_5_902_camera_top.jpg", + "images/task_00099_5_905_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000005", + "recording_id": null, + "episode_id": 5, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. release\nC. transfer\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. pre-approach" + } + ], + "images": [ + "images/task_00099_5_261_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000005", + "recording_id": null, + "episode_id": 5, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00099_5_568_camera_top.jpg", + "images/task_00099_5_571_camera_top.jpg", + "images/task_00099_5_574_camera_top.jpg", + "images/task_00099_5_577_camera_top.jpg", + "images/task_00099_5_580_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000005", + "recording_id": null, + "episode_id": 5, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the left side of the scene\nA. toward the right side of the scene\nC. toward the top of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the left side of the scene" + } + ], + "images": [ + "images/task_00099_5_626_camera_top.jpg", + "images/task_00099_5_631_camera_top.jpg", + "images/task_00099_5_636_camera_top.jpg", + "images/task_00099_5_641_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000005", + "recording_id": null, + "episode_id": 5, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_00099_5_477_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000005", + "recording_id": null, + "episode_id": 5, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00099_5_527_camera_top.jpg", + "images/task_00099_5_292_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000005", + "recording_id": null, + "episode_id": 5, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00099_5_852_camera_top.jpg", + "images/task_00099_5_855_camera_top.jpg", + "images/task_00099_5_858_camera_top.jpg", + "images/task_00099_5_861_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000005", + "recording_id": null, + "episode_id": 5, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00099_5_891_camera_top.jpg", + "images/task_00099_5_894_camera_top.jpg", + "images/task_00099_5_897_camera_top.jpg", + "images/task_00099_5_900_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000005", + "recording_id": null, + "episode_id": 5, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00099_8_165_camera_top.jpg", + "images/task_00099_8_168_camera_top.jpg", + "images/task_00099_8_171_camera_top.jpg", + "images/task_00099_8_174_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000008", + "recording_id": null, + "episode_id": 8, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the left side of the scene\nA. toward the top of the scene\nB. toward the right side of the scene\nC. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the left side of the scene" + } + ], + "images": [ + "images/task_00099_8_699_camera_top.jpg", + "images/task_00099_8_704_camera_top.jpg", + "images/task_00099_8_709_camera_top.jpg", + "images/task_00099_8_714_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000008", + "recording_id": null, + "episode_id": 8, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00099_8_414_camera_top.jpg", + "images/task_00099_8_478_camera_top.jpg", + "images/task_00099_8_275_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000008", + "recording_id": null, + "episode_id": 8, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the left side of the scene\nA. toward the right side of the scene\nC. toward the bottom of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the left side of the scene" + } + ], + "images": [ + "images/task_00099_8_695_camera_top.jpg", + "images/task_00099_8_700_camera_top.jpg", + "images/task_00099_8_705_camera_top.jpg", + "images/task_00099_8_710_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000008", + "recording_id": null, + "episode_id": 8, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00099_8_163_camera_top.jpg", + "images/task_00099_8_166_camera_top.jpg", + "images/task_00099_8_169_camera_top.jpg", + "images/task_00099_8_172_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000008", + "recording_id": null, + "episode_id": 8, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00099_8_159_camera_top.jpg", + "images/task_00099_8_162_camera_top.jpg", + "images/task_00099_8_165_camera_top.jpg", + "images/task_00099_8_168_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000008", + "recording_id": null, + "episode_id": 8, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the left side of the scene\nB. toward the top of the scene\nC. toward the right side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the left side of the scene" + } + ], + "images": [ + "images/task_00099_8_703_camera_top.jpg", + "images/task_00099_8_708_camera_top.jpg", + "images/task_00099_8_713_camera_top.jpg", + "images/task_00099_8_718_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000008", + "recording_id": null, + "episode_id": 8, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00099_8_709_camera_top.jpg", + "images/task_00099_8_712_camera_top.jpg", + "images/task_00099_8_715_camera_top.jpg", + "images/task_00099_8_718_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000008", + "recording_id": null, + "episode_id": 8, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00099_8_338_camera_top.jpg", + "images/task_00099_8_561_camera_top.jpg", + "images/task_00099_8_286_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000008", + "recording_id": null, + "episode_id": 8, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00099_10_129_camera_top.jpg", + "images/task_00099_10_132_camera_top.jpg", + "images/task_00099_10_135_camera_top.jpg", + "images/task_00099_10_138_camera_top.jpg", + "images/task_00099_10_141_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000010", + "recording_id": null, + "episode_id": 10, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00099_10_581_camera_top.jpg", + "images/task_00099_10_584_camera_top.jpg", + "images/task_00099_10_587_camera_top.jpg", + "images/task_00099_10_590_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000010", + "recording_id": null, + "episode_id": 10, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00099_10_334_camera_top_t-6.jpg", + "images/task_00099_10_337_camera_top_t-3.jpg", + "images/task_00099_10_340_camera_top_t0.jpg", + "images/task_00099_10_343_camera_top_t+3.jpg", + "images/task_00099_10_346_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000010", + "recording_id": null, + "episode_id": 10, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00099_10_271_camera_top.jpg", + "images/task_00099_10_274_camera_top.jpg", + "images/task_00099_10_277_camera_top.jpg", + "images/task_00099_10_280_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000010", + "recording_id": null, + "episode_id": 10, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_00099_10_150_camera_top.jpg", + "images/task_00099_10_250_camera_top.jpg", + "images/task_00099_10_480_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000010", + "recording_id": null, + "episode_id": 10, + "camera": "camera_top", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00099_10_179_camera_top.jpg", + "images/task_00099_10_182_camera_top.jpg", + "images/task_00099_10_185_camera_top.jpg", + "images/task_00099_10_188_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000010", + "recording_id": null, + "episode_id": 10, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00099_10_491_camera_top_t-6.jpg", + "images/task_00099_10_494_camera_top_t-3.jpg", + "images/task_00099_10_497_camera_top_t0.jpg", + "images/task_00099_10_500_camera_top_t+3.jpg", + "images/task_00099_10_503_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000010", + "recording_id": null, + "episode_id": 10, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00099_10_91_camera_top.jpg", + "images/task_00099_10_94_camera_top.jpg", + "images/task_00099_10_97_camera_top.jpg", + "images/task_00099_10_100_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000010", + "recording_id": null, + "episode_id": 10, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00099_10_176_camera_top.jpg", + "images/task_00099_10_179_camera_top.jpg", + "images/task_00099_10_182_camera_top.jpg", + "images/task_00099_10_185_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000010", + "recording_id": null, + "episode_id": 10, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00099_16_523_camera_top.jpg", + "images/task_00099_16_526_camera_top.jpg", + "images/task_00099_16_529_camera_top.jpg", + "images/task_00099_16_532_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000016", + "recording_id": null, + "episode_id": 16, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_00099_16_184_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000016", + "recording_id": null, + "episode_id": 16, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_00099_16_421_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000016", + "recording_id": null, + "episode_id": 16, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00099_16_671_camera_top.jpg", + "images/task_00099_16_674_camera_top.jpg", + "images/task_00099_16_677_camera_top.jpg", + "images/task_00099_16_680_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000016", + "recording_id": null, + "episode_id": 16, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00099_16_172_camera_top.jpg", + "images/task_00099_16_175_camera_top.jpg", + "images/task_00099_16_178_camera_top.jpg", + "images/task_00099_16_181_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000016", + "recording_id": null, + "episode_id": 16, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00099_16_552_camera_top.jpg", + "images/task_00099_16_555_camera_top.jpg", + "images/task_00099_16_558_camera_top.jpg", + "images/task_00099_16_561_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000016", + "recording_id": null, + "episode_id": 16, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00099_16_351_camera_top.jpg", + "images/task_00099_16_226_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000016", + "recording_id": null, + "episode_id": 16, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00099_16_549_camera_top.jpg", + "images/task_00099_16_552_camera_top.jpg", + "images/task_00099_16_555_camera_top.jpg", + "images/task_00099_16_558_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000016", + "recording_id": null, + "episode_id": 16, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. hold and carry\nC. contact\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. approach" + } + ], + "images": [ + "images/task_00099_16_224_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000016", + "recording_id": null, + "episode_id": 16, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00099_16_419_camera_top.jpg", + "images/task_00099_16_422_camera_top.jpg", + "images/task_00099_16_425_camera_top.jpg", + "images/task_00099_16_428_camera_top.jpg", + "images/task_00099_16_431_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000016", + "recording_id": null, + "episode_id": 16, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00099_19_452_camera_top_t-6.jpg", + "images/task_00099_19_458_camera_top_t-3.jpg", + "images/task_00099_19_458_camera_top_t0.jpg", + "images/task_00099_19_461_camera_top_t+3.jpg", + "images/task_00099_19_464_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000019", + "recording_id": null, + "episode_id": 19, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. pre-approach\nC. contact\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/task_00099_19_254_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000019", + "recording_id": null, + "episode_id": 19, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00099_19_509_camera_top.jpg", + "images/task_00099_19_512_camera_top.jpg", + "images/task_00099_19_515_camera_top.jpg", + "images/task_00099_19_518_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000019", + "recording_id": null, + "episode_id": 19, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. transfer\nC. approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/task_00099_19_398_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000019", + "recording_id": null, + "episode_id": 19, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the left side of the scene\nA. toward the bottom of the scene\nB. toward the top of the scene\nC. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the left side of the scene" + } + ], + "images": [ + "images/task_00099_19_349_camera_top.jpg", + "images/task_00099_19_354_camera_top.jpg", + "images/task_00099_19_359_camera_top.jpg", + "images/task_00099_19_364_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000019", + "recording_id": null, + "episode_id": 19, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00099_19_311_camera_top_t-6.jpg", + "images/task_00099_19_314_camera_top_t-3.jpg", + "images/task_00099_19_317_camera_top_t0.jpg", + "images/task_00099_19_320_camera_top_t+3.jpg", + "images/task_00099_19_323_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000019", + "recording_id": null, + "episode_id": 19, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00099_19_555_camera_top.jpg", + "images/task_00099_19_558_camera_top.jpg", + "images/task_00099_19_561_camera_top.jpg", + "images/task_00099_19_564_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000019", + "recording_id": null, + "episode_id": 19, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00099_19_357_camera_top.jpg", + "images/task_00099_19_360_camera_top.jpg", + "images/task_00099_19_363_camera_top.jpg", + "images/task_00099_19_366_camera_top.jpg", + "images/task_00099_19_369_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000019", + "recording_id": null, + "episode_id": 19, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00099_19_559_camera_top.jpg", + "images/task_00099_19_562_camera_top.jpg", + "images/task_00099_19_565_camera_top.jpg", + "images/task_00099_19_568_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000019", + "recording_id": null, + "episode_id": 19, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00099_19_303_camera_top.jpg", + "images/task_00099_19_306_camera_top.jpg", + "images/task_00099_19_309_camera_top.jpg", + "images/task_00099_19_312_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000019", + "recording_id": null, + "episode_id": 19, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. transfer\nC. approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/task_00099_19_199_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000019", + "recording_id": null, + "episode_id": 19, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_00099_19_384_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000019", + "recording_id": null, + "episode_id": 19, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_00099_19_461_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000019", + "recording_id": null, + "episode_id": 19, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00099_24_247_camera_top_t-6.jpg", + "images/task_00099_24_250_camera_top_t-3.jpg", + "images/task_00099_24_253_camera_top_t0.jpg", + "images/task_00099_24_256_camera_top_t+3.jpg", + "images/task_00099_24_259_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000024", + "recording_id": null, + "episode_id": 24, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00099_24_268_camera_top.jpg", + "images/task_00099_24_271_camera_top.jpg", + "images/task_00099_24_274_camera_top.jpg", + "images/task_00099_24_277_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000024", + "recording_id": null, + "episode_id": 24, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00099_24_674_camera_top.jpg", + "images/task_00099_24_677_camera_top.jpg", + "images/task_00099_24_680_camera_top.jpg", + "images/task_00099_24_683_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000024", + "recording_id": null, + "episode_id": 24, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_00099_24_675_camera_top.jpg", + "images/task_00099_24_578_camera_top.jpg", + "images/task_00099_24_356_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000024", + "recording_id": null, + "episode_id": 24, + "camera": "camera_top", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00099_24_611_camera_top.jpg", + "images/task_00099_24_504_camera_top.jpg", + "images/task_00099_24_198_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000024", + "recording_id": null, + "episode_id": 24, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00099_24_647_camera_top.jpg", + "images/task_00099_24_650_camera_top.jpg", + "images/task_00099_24_653_camera_top.jpg", + "images/task_00099_24_656_camera_top.jpg", + "images/task_00099_24_659_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000024", + "recording_id": null, + "episode_id": 24, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00099_24_396_camera_top.jpg", + "images/task_00099_24_199_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000024", + "recording_id": null, + "episode_id": 24, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00099_24_524_camera_top.jpg", + "images/task_00099_24_527_camera_top.jpg", + "images/task_00099_24_530_camera_top.jpg", + "images/task_00099_24_533_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000024", + "recording_id": null, + "episode_id": 24, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. contact\nC. approach\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/task_00099_24_678_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000024", + "recording_id": null, + "episode_id": 24, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the right side of the scene\nB. toward the top of the scene\nC. toward the bottom of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the right side of the scene" + } + ], + "images": [ + "images/task_00099_24_547_camera_top.jpg", + "images/task_00099_24_552_camera_top.jpg", + "images/task_00099_24_557_camera_top.jpg", + "images/task_00099_24_562_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000024", + "recording_id": null, + "episode_id": 24, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. transfer\nC. release\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/task_00099_24_380_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000024", + "recording_id": null, + "episode_id": 24, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00099_24_383_camera_top.jpg", + "images/task_00099_24_386_camera_top.jpg", + "images/task_00099_24_389_camera_top.jpg", + "images/task_00099_24_392_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000024", + "recording_id": null, + "episode_id": 24, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. contact\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/task_00099_24_328_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000024", + "recording_id": null, + "episode_id": 24, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the left side of the scene\nB. toward the top of the scene\nC. toward the bottom of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the left side of the scene" + } + ], + "images": [ + "images/task_00099_35_427_camera_top.jpg", + "images/task_00099_35_432_camera_top.jpg", + "images/task_00099_35_437_camera_top.jpg", + "images/task_00099_35_442_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000035", + "recording_id": null, + "episode_id": 35, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. contact\nC. transfer\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/task_00099_35_446_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000035", + "recording_id": null, + "episode_id": 35, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. release\nC. pre-approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/task_00099_35_536_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000035", + "recording_id": null, + "episode_id": 35, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_00099_35_335_camera_top.jpg", + "images/task_00099_35_696_camera_top.jpg", + "images/task_00099_35_372_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000035", + "recording_id": null, + "episode_id": 35, + "camera": "camera_top", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00099_35_266_camera_top.jpg", + "images/task_00099_35_269_camera_top.jpg", + "images/task_00099_35_272_camera_top.jpg", + "images/task_00099_35_275_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000035", + "recording_id": null, + "episode_id": 35, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00099_35_460_camera_top.jpg", + "images/task_00099_35_332_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000035", + "recording_id": null, + "episode_id": 35, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00099_35_458_camera_top.jpg", + "images/task_00099_35_461_camera_top.jpg", + "images/task_00099_35_464_camera_top.jpg", + "images/task_00099_35_467_camera_top.jpg", + "images/task_00099_35_470_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000035", + "recording_id": null, + "episode_id": 35, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. pre-approach\nC. approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/task_00099_35_478_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000035", + "recording_id": null, + "episode_id": 35, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00099_35_401_camera_top.jpg", + "images/task_00099_35_404_camera_top.jpg", + "images/task_00099_35_407_camera_top.jpg", + "images/task_00099_35_410_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000035", + "recording_id": null, + "episode_id": 35, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00099_35_264_camera_top.jpg", + "images/task_00099_35_267_camera_top.jpg", + "images/task_00099_35_270_camera_top.jpg", + "images/task_00099_35_273_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000035", + "recording_id": null, + "episode_id": 35, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00099_35_484_camera_top_t-6.jpg", + "images/task_00099_35_487_camera_top_t-3.jpg", + "images/task_00099_35_490_camera_top_t0.jpg", + "images/task_00099_35_493_camera_top_t+3.jpg", + "images/task_00099_35_496_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000035", + "recording_id": null, + "episode_id": 35, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the right side of the scene\nB. toward the top of the scene\nC. toward the left side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the right side of the scene" + } + ], + "images": [ + "images/task_00099_35_501_camera_top.jpg", + "images/task_00099_35_506_camera_top.jpg", + "images/task_00099_35_511_camera_top.jpg", + "images/task_00099_35_516_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000035", + "recording_id": null, + "episode_id": 35, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00099_35_502_camera_top.jpg", + "images/task_00099_35_505_camera_top.jpg", + "images/task_00099_35_508_camera_top.jpg", + "images/task_00099_35_511_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000035", + "recording_id": null, + "episode_id": 35, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00099_38_179_camera_top.jpg", + "images/task_00099_38_182_camera_top.jpg", + "images/task_00099_38_185_camera_top.jpg", + "images/task_00099_38_188_camera_top.jpg", + "images/task_00099_38_191_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000038", + "recording_id": null, + "episode_id": 38, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00099_38_301_camera_top.jpg", + "images/task_00099_38_304_camera_top.jpg", + "images/task_00099_38_307_camera_top.jpg", + "images/task_00099_38_310_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000038", + "recording_id": null, + "episode_id": 38, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00099_38_145_camera_top.jpg", + "images/task_00099_38_148_camera_top.jpg", + "images/task_00099_38_151_camera_top.jpg", + "images/task_00099_38_154_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000038", + "recording_id": null, + "episode_id": 38, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00099_38_172_camera_top.jpg", + "images/task_00099_38_383_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000038", + "recording_id": null, + "episode_id": 38, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00099_38_60_camera_top.jpg", + "images/task_00099_38_63_camera_top.jpg", + "images/task_00099_38_66_camera_top.jpg", + "images/task_00099_38_69_camera_top.jpg", + "images/task_00099_38_72_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000038", + "recording_id": null, + "episode_id": 38, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the top of the scene\nA. toward the right side of the scene\nC. toward the bottom of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the top of the scene" + } + ], + "images": [ + "images/task_00099_38_759_camera_top.jpg", + "images/task_00099_38_764_camera_top.jpg", + "images/task_00099_38_769_camera_top.jpg", + "images/task_00099_38_774_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000038", + "recording_id": null, + "episode_id": 38, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00099_38_616_camera_top.jpg", + "images/task_00099_38_619_camera_top.jpg", + "images/task_00099_38_622_camera_top.jpg", + "images/task_00099_38_625_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000038", + "recording_id": null, + "episode_id": 38, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00099_38_480_camera_top.jpg", + "images/task_00099_38_483_camera_top.jpg", + "images/task_00099_38_486_camera_top.jpg", + "images/task_00099_38_489_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000038", + "recording_id": null, + "episode_id": 38, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_00099_38_624_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000038", + "recording_id": null, + "episode_id": 38, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00099_41_544_camera_top.jpg", + "images/task_00099_41_547_camera_top.jpg", + "images/task_00099_41_550_camera_top.jpg", + "images/task_00099_41_553_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000041", + "recording_id": null, + "episode_id": 41, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00099_41_491_camera_top.jpg", + "images/task_00099_41_533_camera_top.jpg", + "images/task_00099_41_1129_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000041", + "recording_id": null, + "episode_id": 41, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00099_41_853_camera_top.jpg", + "images/task_00099_41_856_camera_top.jpg", + "images/task_00099_41_859_camera_top.jpg", + "images/task_00099_41_862_camera_top.jpg", + "images/task_00099_41_865_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000041", + "recording_id": null, + "episode_id": 41, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the left side of the scene\nB. toward the bottom of the scene\nC. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00099_41_1047_camera_top.jpg", + "images/task_00099_41_1052_camera_top.jpg", + "images/task_00099_41_1057_camera_top.jpg", + "images/task_00099_41_1062_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000041", + "recording_id": null, + "episode_id": 41, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the right side of the scene\nA. toward the top of the scene\nB. toward the left side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the right side of the scene" + } + ], + "images": [ + "images/task_00099_41_828_camera_top.jpg", + "images/task_00099_41_833_camera_top.jpg", + "images/task_00099_41_838_camera_top.jpg", + "images/task_00099_41_843_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000041", + "recording_id": null, + "episode_id": 41, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00099_41_637_camera_top_t-6.jpg", + "images/task_00099_41_640_camera_top_t-3.jpg", + "images/task_00099_41_643_camera_top_t0.jpg", + "images/task_00099_41_646_camera_top_t+3.jpg", + "images/task_00099_41_649_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000041", + "recording_id": null, + "episode_id": 41, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00099_41_369_camera_top.jpg", + "images/task_00099_41_372_camera_top.jpg", + "images/task_00099_41_375_camera_top.jpg", + "images/task_00099_41_378_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000041", + "recording_id": null, + "episode_id": 41, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00099_41_830_camera_top.jpg", + "images/task_00099_41_630_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000041", + "recording_id": null, + "episode_id": 41, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the bottom of the scene\nC. toward the left side of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00099_41_830_camera_top.jpg", + "images/task_00099_41_835_camera_top.jpg", + "images/task_00099_41_840_camera_top.jpg", + "images/task_00099_41_845_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000041", + "recording_id": null, + "episode_id": 41, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00099_41_840_camera_top.jpg", + "images/task_00099_41_843_camera_top.jpg", + "images/task_00099_41_846_camera_top.jpg", + "images/task_00099_41_849_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000041", + "recording_id": null, + "episode_id": 41, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00099_41_694_camera_top.jpg", + "images/task_00099_41_1143_camera_top.jpg", + "images/task_00099_41_802_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000041", + "recording_id": null, + "episode_id": 41, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00099_43_701_camera_top_t-6.jpg", + "images/task_00099_43_704_camera_top_t-3.jpg", + "images/task_00099_43_707_camera_top_t0.jpg", + "images/task_00099_43_710_camera_top_t+3.jpg", + "images/task_00099_43_713_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000043", + "recording_id": null, + "episode_id": 43, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00099_43_604_camera_top_t-6.jpg", + "images/task_00099_43_607_camera_top_t-3.jpg", + "images/task_00099_43_610_camera_top_t0.jpg", + "images/task_00099_43_613_camera_top_t+3.jpg", + "images/task_00099_43_616_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000043", + "recording_id": null, + "episode_id": 43, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00099_43_473_camera_top.jpg", + "images/task_00099_43_476_camera_top.jpg", + "images/task_00099_43_479_camera_top.jpg", + "images/task_00099_43_482_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000043", + "recording_id": null, + "episode_id": 43, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00099_43_559_camera_top.jpg", + "images/task_00099_43_562_camera_top.jpg", + "images/task_00099_43_565_camera_top.jpg", + "images/task_00099_43_568_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000043", + "recording_id": null, + "episode_id": 43, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_00099_43_276_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000043", + "recording_id": null, + "episode_id": 43, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00099_43_484_camera_top_t-6.jpg", + "images/task_00099_43_487_camera_top_t-3.jpg", + "images/task_00099_43_490_camera_top_t0.jpg", + "images/task_00099_43_493_camera_top_t+3.jpg", + "images/task_00099_43_496_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000043", + "recording_id": null, + "episode_id": 43, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00099_43_412_camera_top.jpg", + "images/task_00099_43_415_camera_top.jpg", + "images/task_00099_43_418_camera_top.jpg", + "images/task_00099_43_421_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000043", + "recording_id": null, + "episode_id": 43, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00099_43_518_camera_top.jpg", + "images/task_00099_43_521_camera_top.jpg", + "images/task_00099_43_524_camera_top.jpg", + "images/task_00099_43_527_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000043", + "recording_id": null, + "episode_id": 43, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/task_00099_43_895_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000043", + "recording_id": null, + "episode_id": 43, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00099_43_521_camera_top.jpg", + "images/task_00099_43_524_camera_top.jpg", + "images/task_00099_43_527_camera_top.jpg", + "images/task_00099_43_530_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000043", + "recording_id": null, + "episode_id": 43, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00099_43_435_camera_top.jpg", + "images/task_00099_43_576_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000043", + "recording_id": null, + "episode_id": 43, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00099_43_310_camera_top.jpg", + "images/task_00099_43_313_camera_top.jpg", + "images/task_00099_43_316_camera_top.jpg", + "images/task_00099_43_319_camera_top.jpg", + "images/task_00099_43_322_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000043", + "recording_id": null, + "episode_id": 43, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00099_43_294_camera_top.jpg", + "images/task_00099_43_447_camera_top.jpg", + "images/task_00099_43_251_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000043", + "recording_id": null, + "episode_id": 43, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00099_46_390_camera_top.jpg", + "images/task_00099_46_302_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000046", + "recording_id": null, + "episode_id": 46, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the top of the scene\nA. toward the right side of the scene\nC. toward the bottom of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the top of the scene" + } + ], + "images": [ + "images/task_00099_46_525_camera_top.jpg", + "images/task_00099_46_530_camera_top.jpg", + "images/task_00099_46_535_camera_top.jpg", + "images/task_00099_46_540_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000046", + "recording_id": null, + "episode_id": 46, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_00099_46_129_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000046", + "recording_id": null, + "episode_id": 46, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. pre-approach\nC. approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. approach" + } + ], + "images": [ + "images/task_00099_46_406_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000046", + "recording_id": null, + "episode_id": 46, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00099_46_536_camera_top_t-6.jpg", + "images/task_00099_46_539_camera_top_t-3.jpg", + "images/task_00099_46_542_camera_top_t0.jpg", + "images/task_00099_46_545_camera_top_t+3.jpg", + "images/task_00099_46_548_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000046", + "recording_id": null, + "episode_id": 46, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00099_46_435_camera_top.jpg", + "images/task_00099_46_438_camera_top.jpg", + "images/task_00099_46_441_camera_top.jpg", + "images/task_00099_46_444_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000046", + "recording_id": null, + "episode_id": 46, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00099_46_572_camera_top.jpg", + "images/task_00099_46_575_camera_top.jpg", + "images/task_00099_46_578_camera_top.jpg", + "images/task_00099_46_581_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000046", + "recording_id": null, + "episode_id": 46, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00099_46_436_camera_top.jpg", + "images/task_00099_46_439_camera_top.jpg", + "images/task_00099_46_442_camera_top.jpg", + "images/task_00099_46_445_camera_top.jpg", + "images/task_00099_46_448_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000046", + "recording_id": null, + "episode_id": 46, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00099_46_285_camera_top.jpg", + "images/task_00099_46_393_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000046", + "recording_id": null, + "episode_id": 46, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00099_46_720_camera_top_t-6.jpg", + "images/task_00099_46_723_camera_top_t-3.jpg", + "images/task_00099_46_726_camera_top_t0.jpg", + "images/task_00099_46_729_camera_top_t+3.jpg", + "images/task_00099_46_732_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000046", + "recording_id": null, + "episode_id": 46, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00099_46_338_camera_top.jpg", + "images/task_00099_46_341_camera_top.jpg", + "images/task_00099_46_344_camera_top.jpg", + "images/task_00099_46_347_camera_top.jpg", + "images/task_00099_46_350_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000046", + "recording_id": null, + "episode_id": 46, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00099_46_480_camera_top.jpg", + "images/task_00099_46_483_camera_top.jpg", + "images/task_00099_46_486_camera_top.jpg", + "images/task_00099_46_489_camera_top.jpg", + "images/task_00099_46_492_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000046", + "recording_id": null, + "episode_id": 46, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00099_51_478_camera_top.jpg", + "images/task_00099_51_481_camera_top.jpg", + "images/task_00099_51_484_camera_top.jpg", + "images/task_00099_51_487_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000051", + "recording_id": null, + "episode_id": 51, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00099_51_171_camera_top.jpg", + "images/task_00099_51_174_camera_top.jpg", + "images/task_00099_51_177_camera_top.jpg", + "images/task_00099_51_180_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000051", + "recording_id": null, + "episode_id": 51, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00099_51_785_camera_top.jpg", + "images/task_00099_51_788_camera_top.jpg", + "images/task_00099_51_791_camera_top.jpg", + "images/task_00099_51_794_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000051", + "recording_id": null, + "episode_id": 51, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00099_51_784_camera_top.jpg", + "images/task_00099_51_787_camera_top.jpg", + "images/task_00099_51_790_camera_top.jpg", + "images/task_00099_51_793_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000051", + "recording_id": null, + "episode_id": 51, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_00099_51_590_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000051", + "recording_id": null, + "episode_id": 51, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the top of the scene\nA. toward the bottom of the scene\nB. toward the right side of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the top of the scene" + } + ], + "images": [ + "images/task_00099_51_727_camera_top.jpg", + "images/task_00099_51_732_camera_top.jpg", + "images/task_00099_51_737_camera_top.jpg", + "images/task_00099_51_742_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000051", + "recording_id": null, + "episode_id": 51, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00099_51_535_camera_top.jpg", + "images/task_00099_51_371_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000051", + "recording_id": null, + "episode_id": 51, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. contact\nC. approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/task_00099_51_783_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000051", + "recording_id": null, + "episode_id": 51, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00099_51_64_camera_top.jpg", + "images/task_00099_51_67_camera_top.jpg", + "images/task_00099_51_70_camera_top.jpg", + "images/task_00099_51_73_camera_top.jpg", + "images/task_00099_51_76_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000051", + "recording_id": null, + "episode_id": 51, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00099_51_666_camera_top.jpg", + "images/task_00099_51_311_camera_top.jpg", + "images/task_00099_51_876_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000051", + "recording_id": null, + "episode_id": 51, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/task_00099_51_602_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000051", + "recording_id": null, + "episode_id": 51, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00099_51_787_camera_top.jpg", + "images/task_00099_51_790_camera_top.jpg", + "images/task_00099_51_793_camera_top.jpg", + "images/task_00099_51_796_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000051", + "recording_id": null, + "episode_id": 51, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00099_134_689_camera_top.jpg", + "images/task_00099_134_692_camera_top.jpg", + "images/task_00099_134_695_camera_top.jpg", + "images/task_00099_134_698_camera_top.jpg", + "images/task_00099_134_701_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000134", + "recording_id": null, + "episode_id": 134, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00099_134_668_camera_top_t-6.jpg", + "images/task_00099_134_671_camera_top_t-3.jpg", + "images/task_00099_134_674_camera_top_t0.jpg", + "images/task_00099_134_677_camera_top_t+3.jpg", + "images/task_00099_134_680_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000134", + "recording_id": null, + "episode_id": 134, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00099_134_494_camera_top.jpg", + "images/task_00099_134_292_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000134", + "recording_id": null, + "episode_id": 134, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00099_134_720_camera_top.jpg", + "images/task_00099_134_723_camera_top.jpg", + "images/task_00099_134_726_camera_top.jpg", + "images/task_00099_134_729_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000134", + "recording_id": null, + "episode_id": 134, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00099_134_586_camera_top.jpg", + "images/task_00099_134_952_camera_top.jpg", + "images/task_00099_134_252_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000134", + "recording_id": null, + "episode_id": 134, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. release\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/task_00099_134_714_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000134", + "recording_id": null, + "episode_id": 134, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00099_134_800_camera_top.jpg", + "images/task_00099_134_803_camera_top.jpg", + "images/task_00099_134_806_camera_top.jpg", + "images/task_00099_134_809_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000134", + "recording_id": null, + "episode_id": 134, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00099_134_611_camera_top.jpg", + "images/task_00099_134_614_camera_top.jpg", + "images/task_00099_134_617_camera_top.jpg", + "images/task_00099_134_620_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000134", + "recording_id": null, + "episode_id": 134, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00099_134_614_camera_top.jpg", + "images/task_00099_134_775_camera_top.jpg", + "images/task_00099_134_218_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00099", + "split": "eval", + "split_group_id": "task_00099__episode_000134", + "recording_id": null, + "episode_id": 134, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00100_65_416_camera_top.jpg", + "images/task_00100_65_279_camera_top.jpg", + "images/task_00100_65_660_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00100_65_629_camera_top_t-6.jpg", + "images/task_00100_65_632_camera_top_t-3.jpg", + "images/task_00100_65_635_camera_top_t0.jpg", + "images/task_00100_65_638_camera_top_t+3.jpg", + "images/task_00100_65_641_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00100_65_204_camera_top.jpg", + "images/task_00100_65_315_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_00100_65_309_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00100_65_656_camera_top.jpg", + "images/task_00100_65_659_camera_top.jpg", + "images/task_00100_65_662_camera_top.jpg", + "images/task_00100_65_665_camera_top.jpg", + "images/task_00100_65_668_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00100_65_202_camera_top.jpg", + "images/task_00100_65_205_camera_top.jpg", + "images/task_00100_65_208_camera_top.jpg", + "images/task_00100_65_211_camera_top.jpg", + "images/task_00100_65_214_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00100_65_489_camera_top.jpg", + "images/task_00100_65_666_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_00100_65_182_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00100_92_313_camera_top.jpg", + "images/task_00100_92_316_camera_top.jpg", + "images/task_00100_92_319_camera_top.jpg", + "images/task_00100_92_322_camera_top.jpg", + "images/task_00100_92_325_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000092", + "recording_id": null, + "episode_id": 92, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00100_92_614_camera_top.jpg", + "images/task_00100_92_617_camera_top.jpg", + "images/task_00100_92_620_camera_top.jpg", + "images/task_00100_92_623_camera_top.jpg", + "images/task_00100_92_626_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000092", + "recording_id": null, + "episode_id": 92, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the left side of the scene\nA. toward the top of the scene\nB. toward the bottom of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the left side of the scene" + } + ], + "images": [ + "images/task_00100_92_659_camera_top.jpg", + "images/task_00100_92_664_camera_top.jpg", + "images/task_00100_92_669_camera_top.jpg", + "images/task_00100_92_674_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000092", + "recording_id": null, + "episode_id": 92, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the left side of the scene\nA. toward the right side of the scene\nB. toward the top of the scene\nC. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the left side of the scene" + } + ], + "images": [ + "images/task_00100_92_634_camera_top.jpg", + "images/task_00100_92_639_camera_top.jpg", + "images/task_00100_92_644_camera_top.jpg", + "images/task_00100_92_649_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000092", + "recording_id": null, + "episode_id": 92, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00100_92_403_camera_top.jpg", + "images/task_00100_92_406_camera_top.jpg", + "images/task_00100_92_409_camera_top.jpg", + "images/task_00100_92_412_camera_top.jpg", + "images/task_00100_92_415_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000092", + "recording_id": null, + "episode_id": 92, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00100_92_873_camera_top_t-6.jpg", + "images/task_00100_92_876_camera_top_t-3.jpg", + "images/task_00100_92_879_camera_top_t0.jpg", + "images/task_00100_92_882_camera_top_t+3.jpg", + "images/task_00100_92_885_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000092", + "recording_id": null, + "episode_id": 92, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00100_92_635_camera_top.jpg", + "images/task_00100_92_515_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000092", + "recording_id": null, + "episode_id": 92, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00100_92_293_camera_top.jpg", + "images/task_00100_92_189_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000092", + "recording_id": null, + "episode_id": 92, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. hold and carry\nC. pre-approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. pre-approach" + } + ], + "images": [ + "images/task_00100_92_186_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000092", + "recording_id": null, + "episode_id": 92, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_00100_92_508_camera_top.jpg", + "images/task_00100_92_543_camera_top.jpg", + "images/task_00100_92_218_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000092", + "recording_id": null, + "episode_id": 92, + "camera": "camera_top", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the right side of the scene\nB. toward the left side of the scene\nC. toward the bottom of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the right side of the scene" + } + ], + "images": [ + "images/task_00100_92_763_camera_top.jpg", + "images/task_00100_92_768_camera_top.jpg", + "images/task_00100_92_773_camera_top.jpg", + "images/task_00100_92_778_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000092", + "recording_id": null, + "episode_id": 92, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00100_92_739_camera_top.jpg", + "images/task_00100_92_516_camera_top.jpg", + "images/task_00100_92_477_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000092", + "recording_id": null, + "episode_id": 92, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00100_92_848_camera_top_t-6.jpg", + "images/task_00100_92_851_camera_top_t-3.jpg", + "images/task_00100_92_854_camera_top_t0.jpg", + "images/task_00100_92_857_camera_top_t+3.jpg", + "images/task_00100_92_860_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000092", + "recording_id": null, + "episode_id": 92, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the right side of the scene\nA. toward the left side of the scene\nB. toward the top of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the right side of the scene" + } + ], + "images": [ + "images/task_00100_113_702_camera_top.jpg", + "images/task_00100_113_707_camera_top.jpg", + "images/task_00100_113_712_camera_top.jpg", + "images/task_00100_113_717_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000113", + "recording_id": null, + "episode_id": 113, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. contact\nC. approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/task_00100_113_675_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000113", + "recording_id": null, + "episode_id": 113, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_00100_113_305_camera_top.jpg", + "images/task_00100_113_674_camera_top.jpg", + "images/task_00100_113_130_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000113", + "recording_id": null, + "episode_id": 113, + "camera": "camera_top", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00100_113_786_camera_top_t-6.jpg", + "images/task_00100_113_789_camera_top_t-3.jpg", + "images/task_00100_113_792_camera_top_t0.jpg", + "images/task_00100_113_795_camera_top_t+3.jpg", + "images/task_00100_113_798_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000113", + "recording_id": null, + "episode_id": 113, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00100_113_139_camera_top.jpg", + "images/task_00100_113_252_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000113", + "recording_id": null, + "episode_id": 113, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00100_113_735_camera_top.jpg", + "images/task_00100_113_738_camera_top.jpg", + "images/task_00100_113_741_camera_top.jpg", + "images/task_00100_113_744_camera_top.jpg", + "images/task_00100_113_747_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000113", + "recording_id": null, + "episode_id": 113, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. release\nC. contact\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. approach" + } + ], + "images": [ + "images/task_00100_113_510_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000113", + "recording_id": null, + "episode_id": 113, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_00100_113_606_camera_top.jpg", + "images/task_00100_113_161_camera_top.jpg", + "images/task_00100_113_305_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000113", + "recording_id": null, + "episode_id": 113, + "camera": "camera_top", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. pre-approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. pre-approach" + } + ], + "images": [ + "images/task_00100_113_81_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000113", + "recording_id": null, + "episode_id": 113, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00100_113_484_camera_top.jpg", + "images/task_00100_113_421_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000113", + "recording_id": null, + "episode_id": 113, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00100_115_725_camera_top.jpg", + "images/task_00100_115_842_camera_top.jpg", + "images/task_00100_115_577_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000115", + "recording_id": null, + "episode_id": 115, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00100_115_457_camera_top.jpg", + "images/task_00100_115_460_camera_top.jpg", + "images/task_00100_115_463_camera_top.jpg", + "images/task_00100_115_466_camera_top.jpg", + "images/task_00100_115_469_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000115", + "recording_id": null, + "episode_id": 115, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00100_115_726_camera_top_t-6.jpg", + "images/task_00100_115_729_camera_top_t-3.jpg", + "images/task_00100_115_732_camera_top_t0.jpg", + "images/task_00100_115_735_camera_top_t+3.jpg", + "images/task_00100_115_738_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000115", + "recording_id": null, + "episode_id": 115, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00100_115_555_camera_top.jpg", + "images/task_00100_115_558_camera_top.jpg", + "images/task_00100_115_561_camera_top.jpg", + "images/task_00100_115_564_camera_top.jpg", + "images/task_00100_115_567_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000115", + "recording_id": null, + "episode_id": 115, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. contact\nC. hold and carry\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. hold and carry" + } + ], + "images": [ + "images/task_00100_115_308_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000115", + "recording_id": null, + "episode_id": 115, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. hold and carry\nC. approach\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. approach" + } + ], + "images": [ + "images/task_00100_115_102_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000115", + "recording_id": null, + "episode_id": 115, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. pre-approach\nC. hold and carry\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. hold and carry" + } + ], + "images": [ + "images/task_00100_115_316_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000115", + "recording_id": null, + "episode_id": 115, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00100_115_543_camera_top.jpg", + "images/task_00100_115_357_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000115", + "recording_id": null, + "episode_id": 115, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the right side of the scene\nA. toward the top of the scene\nB. toward the bottom of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the right side of the scene" + } + ], + "images": [ + "images/task_00100_115_786_camera_top.jpg", + "images/task_00100_115_791_camera_top.jpg", + "images/task_00100_115_796_camera_top.jpg", + "images/task_00100_115_801_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000115", + "recording_id": null, + "episode_id": 115, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00100_115_570_camera_top.jpg", + "images/task_00100_115_701_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000115", + "recording_id": null, + "episode_id": 115, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_00100_115_682_camera_top.jpg", + "images/task_00100_115_164_camera_top.jpg", + "images/task_00100_115_103_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000115", + "recording_id": null, + "episode_id": 115, + "camera": "camera_top", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00100_115_236_camera_top_t-6.jpg", + "images/task_00100_115_239_camera_top_t-3.jpg", + "images/task_00100_115_242_camera_top_t0.jpg", + "images/task_00100_115_245_camera_top_t+3.jpg", + "images/task_00100_115_248_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000115", + "recording_id": null, + "episode_id": 115, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the left side of the scene\nB. toward the bottom of the scene\nC. toward the top of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the left side of the scene" + } + ], + "images": [ + "images/task_00100_118_110_camera_top.jpg", + "images/task_00100_118_115_camera_top.jpg", + "images/task_00100_118_120_camera_top.jpg", + "images/task_00100_118_125_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000118", + "recording_id": null, + "episode_id": 118, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00100_118_382_camera_top_t-6.jpg", + "images/task_00100_118_385_camera_top_t-3.jpg", + "images/task_00100_118_388_camera_top_t0.jpg", + "images/task_00100_118_391_camera_top_t+3.jpg", + "images/task_00100_118_394_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000118", + "recording_id": null, + "episode_id": 118, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00100_118_604_camera_top.jpg", + "images/task_00100_118_678_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000118", + "recording_id": null, + "episode_id": 118, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00100_118_518_camera_top.jpg", + "images/task_00100_118_114_camera_top.jpg", + "images/task_00100_118_789_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000118", + "recording_id": null, + "episode_id": 118, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00100_118_456_camera_top.jpg", + "images/task_00100_118_499_camera_top.jpg", + "images/task_00100_118_369_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000118", + "recording_id": null, + "episode_id": 118, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00100_118_142_camera_top.jpg", + "images/task_00100_118_145_camera_top.jpg", + "images/task_00100_118_148_camera_top.jpg", + "images/task_00100_118_151_camera_top.jpg", + "images/task_00100_118_154_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000118", + "recording_id": null, + "episode_id": 118, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the bottom of the scene\nB. toward the left side of the scene\nC. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00100_118_606_camera_top.jpg", + "images/task_00100_118_611_camera_top.jpg", + "images/task_00100_118_616_camera_top.jpg", + "images/task_00100_118_621_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000118", + "recording_id": null, + "episode_id": 118, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_00100_123_400_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000123", + "recording_id": null, + "episode_id": 123, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00100_123_276_camera_top.jpg", + "images/task_00100_123_279_camera_top.jpg", + "images/task_00100_123_282_camera_top.jpg", + "images/task_00100_123_285_camera_top.jpg", + "images/task_00100_123_288_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000123", + "recording_id": null, + "episode_id": 123, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the bottom of the scene\nB. toward the left side of the scene\nC. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00100_123_631_camera_top.jpg", + "images/task_00100_123_636_camera_top.jpg", + "images/task_00100_123_641_camera_top.jpg", + "images/task_00100_123_646_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000123", + "recording_id": null, + "episode_id": 123, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00100_123_416_camera_top.jpg", + "images/task_00100_123_106_camera_top.jpg", + "images/task_00100_123_150_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000123", + "recording_id": null, + "episode_id": 123, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. contact\nC. hold and carry\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/task_00100_123_416_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000123", + "recording_id": null, + "episode_id": 123, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. pre-approach\nC. hold and carry\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. pre-approach" + } + ], + "images": [ + "images/task_00100_123_43_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000123", + "recording_id": null, + "episode_id": 123, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00100_123_149_camera_top.jpg", + "images/task_00100_123_152_camera_top.jpg", + "images/task_00100_123_155_camera_top.jpg", + "images/task_00100_123_158_camera_top.jpg", + "images/task_00100_123_161_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000123", + "recording_id": null, + "episode_id": 123, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. release\nC. hold and carry\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/task_00100_123_120_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000123", + "recording_id": null, + "episode_id": 123, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00100_123_353_camera_top.jpg", + "images/task_00100_123_239_camera_top.jpg", + "images/task_00100_123_450_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000123", + "recording_id": null, + "episode_id": 123, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00100_123_117_camera_top.jpg", + "images/task_00100_123_256_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000123", + "recording_id": null, + "episode_id": 123, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00100_123_804_camera_top_t-6.jpg", + "images/task_00100_123_807_camera_top_t-3.jpg", + "images/task_00100_123_810_camera_top_t0.jpg", + "images/task_00100_123_813_camera_top_t+3.jpg", + "images/task_00100_123_816_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000123", + "recording_id": null, + "episode_id": 123, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. contact\nC. transfer\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/task_00100_123_930_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000123", + "recording_id": null, + "episode_id": 123, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00100_123_220_camera_top.jpg", + "images/task_00100_123_223_camera_top.jpg", + "images/task_00100_123_226_camera_top.jpg", + "images/task_00100_123_229_camera_top.jpg", + "images/task_00100_123_232_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000123", + "recording_id": null, + "episode_id": 123, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. approach\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. approach" + } + ], + "images": [ + "images/task_00100_123_382_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000123", + "recording_id": null, + "episode_id": 123, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the bottom of the scene\nC. toward the top of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00100_123_638_camera_top.jpg", + "images/task_00100_123_643_camera_top.jpg", + "images/task_00100_123_648_camera_top.jpg", + "images/task_00100_123_653_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000123", + "recording_id": null, + "episode_id": 123, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the left side of the scene\nA. toward the top of the scene\nC. toward the bottom of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the left side of the scene" + } + ], + "images": [ + "images/task_00100_123_505_camera_top.jpg", + "images/task_00100_123_510_camera_top.jpg", + "images/task_00100_123_515_camera_top.jpg", + "images/task_00100_123_520_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000123", + "recording_id": null, + "episode_id": 123, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the right side of the scene\nB. toward the top of the scene\nC. toward the bottom of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the right side of the scene" + } + ], + "images": [ + "images/task_00100_123_601_camera_top.jpg", + "images/task_00100_123_606_camera_top.jpg", + "images/task_00100_123_611_camera_top.jpg", + "images/task_00100_123_616_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000123", + "recording_id": null, + "episode_id": 123, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the left side of the scene\nB. toward the top of the scene\nC. toward the right side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the left side of the scene" + } + ], + "images": [ + "images/task_00100_126_559_camera_top.jpg", + "images/task_00100_126_564_camera_top.jpg", + "images/task_00100_126_569_camera_top.jpg", + "images/task_00100_126_574_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000126", + "recording_id": null, + "episode_id": 126, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00100_126_313_camera_top.jpg", + "images/task_00100_126_316_camera_top.jpg", + "images/task_00100_126_319_camera_top.jpg", + "images/task_00100_126_322_camera_top.jpg", + "images/task_00100_126_325_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000126", + "recording_id": null, + "episode_id": 126, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00100_126_411_camera_top.jpg", + "images/task_00100_126_636_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000126", + "recording_id": null, + "episode_id": 126, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. pre-approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/task_00100_126_353_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000126", + "recording_id": null, + "episode_id": 126, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the bottom of the scene\nC. toward the left side of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00100_126_730_camera_top.jpg", + "images/task_00100_126_735_camera_top.jpg", + "images/task_00100_126_740_camera_top.jpg", + "images/task_00100_126_745_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000126", + "recording_id": null, + "episode_id": 126, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00100_126_814_camera_top_t-6.jpg", + "images/task_00100_126_817_camera_top_t-3.jpg", + "images/task_00100_126_820_camera_top_t0.jpg", + "images/task_00100_126_823_camera_top_t+3.jpg", + "images/task_00100_126_826_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000126", + "recording_id": null, + "episode_id": 126, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00100_126_571_camera_top.jpg", + "images/task_00100_126_136_camera_top.jpg", + "images/task_00100_126_642_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000126", + "recording_id": null, + "episode_id": 126, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00100_126_132_camera_top.jpg", + "images/task_00100_126_274_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000126", + "recording_id": null, + "episode_id": 126, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00100_126_385_camera_top.jpg", + "images/task_00100_126_306_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00100", + "split": "eval", + "split_group_id": "task_00100__episode_000126", + "recording_id": null, + "episode_id": 126, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the top of the scene\nB. toward the bottom of the scene\nC. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00101_18_574_camera_top.jpg", + "images/task_00101_18_579_camera_top.jpg", + "images/task_00101_18_584_camera_top.jpg", + "images/task_00101_18_589_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00101", + "split": "eval", + "split_group_id": "task_00101__episode_000018", + "recording_id": null, + "episode_id": 18, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the right side of the scene\nA. toward the left side of the scene\nB. toward the bottom of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the right side of the scene" + } + ], + "images": [ + "images/task_00101_84_336_camera_top.jpg", + "images/task_00101_84_341_camera_top.jpg", + "images/task_00101_84_346_camera_top.jpg", + "images/task_00101_84_351_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00101", + "split": "eval", + "split_group_id": "task_00101__episode_000084", + "recording_id": null, + "episode_id": 84, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. transfer\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/task_00101_131_672_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00101", + "split": "eval", + "split_group_id": "task_00101__episode_000131", + "recording_id": null, + "episode_id": 131, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. approach\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/task_00101_131_645_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00101", + "split": "eval", + "split_group_id": "task_00101__episode_000131", + "recording_id": null, + "episode_id": 131, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the left side of the scene\nA. toward the bottom of the scene\nC. toward the right side of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the left side of the scene" + } + ], + "images": [ + "images/task_00101_197_354_camera_top.jpg", + "images/task_00101_197_359_camera_top.jpg", + "images/task_00101_197_364_camera_top.jpg", + "images/task_00101_197_369_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00101", + "split": "eval", + "split_group_id": "task_00101__episode_000197", + "recording_id": null, + "episode_id": 197, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. release\nC. transfer\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. approach" + } + ], + "images": [ + "images/task_00101_206_50_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00101", + "split": "eval", + "split_group_id": "task_00101__episode_000206", + "recording_id": null, + "episode_id": 206, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. release\nC. approach\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/task_00101_244_803_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00101", + "split": "eval", + "split_group_id": "task_00101__episode_000244", + "recording_id": null, + "episode_id": 244, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. transfer\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. approach" + } + ], + "images": [ + "images/task_00101_272_39_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00101", + "split": "eval", + "split_group_id": "task_00101__episode_000272", + "recording_id": null, + "episode_id": 272, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the right side of the scene\nB. toward the top of the scene\nC. toward the left side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the right side of the scene" + } + ], + "images": [ + "images/task_00101_309_504_camera_top.jpg", + "images/task_00101_309_509_camera_top.jpg", + "images/task_00101_309_514_camera_top.jpg", + "images/task_00101_309_519_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00101", + "split": "eval", + "split_group_id": "task_00101__episode_000309", + "recording_id": null, + "episode_id": 309, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. approach\nC. pre-approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. approach" + } + ], + "images": [ + "images/task_00101_347_43_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00101", + "split": "eval", + "split_group_id": "task_00101__episode_000347", + "recording_id": null, + "episode_id": 347, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the left side of the scene\nB. toward the top of the scene\nC. toward the right side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the left side of the scene" + } + ], + "images": [ + "images/task_00101_366_326_camera_top.jpg", + "images/task_00101_366_331_camera_top.jpg", + "images/task_00101_366_336_camera_top.jpg", + "images/task_00101_366_341_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00101", + "split": "eval", + "split_group_id": "task_00101__episode_000366", + "recording_id": null, + "episode_id": 366, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the top of the scene\nC. toward the bottom of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00101_394_533_camera_top.jpg", + "images/task_00101_394_538_camera_top.jpg", + "images/task_00101_394_543_camera_top.jpg", + "images/task_00101_394_548_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00101", + "split": "eval", + "split_group_id": "task_00101__episode_000394", + "recording_id": null, + "episode_id": 394, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the left side of the scene\nA. toward the top of the scene\nB. toward the right side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the left side of the scene" + } + ], + "images": [ + "images/task_00101_403_100_camera_top.jpg", + "images/task_00101_403_105_camera_top.jpg", + "images/task_00101_403_110_camera_top.jpg", + "images/task_00101_403_115_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00101", + "split": "eval", + "split_group_id": "task_00101__episode_000403", + "recording_id": null, + "episode_id": 403, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. pre-approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/task_00101_441_620_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00101", + "split": "eval", + "split_group_id": "task_00101__episode_000441", + "recording_id": null, + "episode_id": 441, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. approach\nC. transfer\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. hold and carry" + } + ], + "images": [ + "images/task_00102_8_186_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00102", + "split": "eval", + "split_group_id": "task_00102__episode_000008", + "recording_id": null, + "episode_id": 8, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00102_12_508_camera_top.jpg", + "images/task_00102_12_511_camera_top.jpg", + "images/task_00102_12_514_camera_top.jpg", + "images/task_00102_12_517_camera_top.jpg", + "images/task_00102_12_520_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00102", + "split": "eval", + "split_group_id": "task_00102__episode_000012", + "recording_id": null, + "episode_id": 12, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00102_16_232_camera_top.jpg", + "images/task_00102_16_325_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00102", + "split": "eval", + "split_group_id": "task_00102__episode_000016", + "recording_id": null, + "episode_id": 16, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00102_16_237_camera_top_t-6.jpg", + "images/task_00102_16_240_camera_top_t-3.jpg", + "images/task_00102_16_243_camera_top_t0.jpg", + "images/task_00102_16_246_camera_top_t+3.jpg", + "images/task_00102_16_249_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00102", + "split": "eval", + "split_group_id": "task_00102__episode_000016", + "recording_id": null, + "episode_id": 16, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00102_77_152_camera_top.jpg", + "images/task_00102_77_155_camera_top.jpg", + "images/task_00102_77_158_camera_top.jpg", + "images/task_00102_77_161_camera_top.jpg", + "images/task_00102_77_164_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00102", + "split": "eval", + "split_group_id": "task_00102__episode_000077", + "recording_id": null, + "episode_id": 77, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. transfer\nC. hold and carry\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. hold and carry" + } + ], + "images": [ + "images/task_00102_85_501_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00102", + "split": "eval", + "split_group_id": "task_00102__episode_000085", + "recording_id": null, + "episode_id": 85, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00102_93_168_camera_top.jpg", + "images/task_00102_93_171_camera_top.jpg", + "images/task_00102_93_174_camera_top.jpg", + "images/task_00102_93_177_camera_top.jpg", + "images/task_00102_93_180_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00102", + "split": "eval", + "split_group_id": "task_00102__episode_000093", + "recording_id": null, + "episode_id": 93, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the bottom of the scene\nB. toward the top of the scene\nC. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00102_162_382_camera_top.jpg", + "images/task_00102_162_387_camera_top.jpg", + "images/task_00102_162_392_camera_top.jpg", + "images/task_00102_162_397_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00102", + "split": "eval", + "split_group_id": "task_00102__episode_000162", + "recording_id": null, + "episode_id": 162, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. transfer\nC. release\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/task_00102_186_206_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00102", + "split": "eval", + "split_group_id": "task_00102__episode_000186", + "recording_id": null, + "episode_id": 186, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. transfer\nC. hold and carry\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. pre-approach" + } + ], + "images": [ + "images/task_00103_2_96_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00103", + "split": "eval", + "split_group_id": "task_00103__episode_000002", + "recording_id": null, + "episode_id": 2, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00103_81_150_camera_top.jpg", + "images/task_00103_81_153_camera_top.jpg", + "images/task_00103_81_156_camera_top.jpg", + "images/task_00103_81_159_camera_top.jpg", + "images/task_00103_81_162_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00103", + "split": "eval", + "split_group_id": "task_00103__episode_000081", + "recording_id": null, + "episode_id": 81, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. transfer\nC. pre-approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. pre-approach" + } + ], + "images": [ + "images/task_00103_86_38_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00103", + "split": "eval", + "split_group_id": "task_00103__episode_000086", + "recording_id": null, + "episode_id": 86, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/task_00103_89_216_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00103", + "split": "eval", + "split_group_id": "task_00103__episode_000089", + "recording_id": null, + "episode_id": 89, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00104_65_230_camera_top.jpg", + "images/task_00104_65_129_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00104_65_69_camera_top.jpg", + "images/task_00104_65_72_camera_top.jpg", + "images/task_00104_65_75_camera_top.jpg", + "images/task_00104_65_78_camera_top.jpg", + "images/task_00104_65_81_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00104_65_232_camera_top.jpg", + "images/task_00104_65_99_camera_top.jpg", + "images/task_00104_65_432_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00104_65_189_camera_top_t-6.jpg", + "images/task_00104_65_192_camera_top_t-3.jpg", + "images/task_00104_65_195_camera_top_t0.jpg", + "images/task_00104_65_198_camera_top_t+3.jpg", + "images/task_00104_65_201_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00104_65_371_camera_top.jpg", + "images/task_00104_65_374_camera_top.jpg", + "images/task_00104_65_377_camera_top.jpg", + "images/task_00104_65_380_camera_top.jpg", + "images/task_00104_65_383_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. hold and carry" + } + ], + "images": [ + "images/task_00104_65_174_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the right side of the scene\nA. toward the left side of the scene\nB. toward the top of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the right side of the scene" + } + ], + "images": [ + "images/task_00104_65_377_camera_top.jpg", + "images/task_00104_65_382_camera_top.jpg", + "images/task_00104_65_387_camera_top.jpg", + "images/task_00104_65_392_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. contact\nC. release\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/task_00104_65_546_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00104_65_135_camera_top_t-6.jpg", + "images/task_00104_65_138_camera_top_t-3.jpg", + "images/task_00104_65_141_camera_top_t0.jpg", + "images/task_00104_65_144_camera_top_t+3.jpg", + "images/task_00104_65_147_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_00104_65_41_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the right side of the scene\nB. toward the left side of the scene\nC. toward the top of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the right side of the scene" + } + ], + "images": [ + "images/task_00104_65_463_camera_top.jpg", + "images/task_00104_65_468_camera_top.jpg", + "images/task_00104_65_473_camera_top.jpg", + "images/task_00104_65_478_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_00104_65_229_camera_top.jpg", + "images/task_00104_65_421_camera_top.jpg", + "images/task_00104_65_165_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the bottom of the scene\nB. toward the left side of the scene\nC. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00104_65_465_camera_top.jpg", + "images/task_00104_65_470_camera_top.jpg", + "images/task_00104_65_475_camera_top.jpg", + "images/task_00104_65_480_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the top of the scene\nB. toward the bottom of the scene\nC. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00104_65_464_camera_top.jpg", + "images/task_00104_65_469_camera_top.jpg", + "images/task_00104_65_474_camera_top.jpg", + "images/task_00104_65_479_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00104_65_446_camera_top.jpg", + "images/task_00104_65_449_camera_top.jpg", + "images/task_00104_65_452_camera_top.jpg", + "images/task_00104_65_455_camera_top.jpg", + "images/task_00104_65_458_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_00104_65_482_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00104_65_96_camera_top.jpg", + "images/task_00104_65_99_camera_top.jpg", + "images/task_00104_65_102_camera_top.jpg", + "images/task_00104_65_105_camera_top.jpg", + "images/task_00104_65_108_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/task_00104_65_111_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_00104_65_174_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the top of the scene\nC. toward the left side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00104_65_174_camera_top.jpg", + "images/task_00104_65_179_camera_top.jpg", + "images/task_00104_65_184_camera_top.jpg", + "images/task_00104_65_189_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. contact\nC. transfer\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/task_00104_65_419_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00104_65_314_camera_top.jpg", + "images/task_00104_65_317_camera_top.jpg", + "images/task_00104_65_320_camera_top.jpg", + "images/task_00104_65_323_camera_top.jpg", + "images/task_00104_65_326_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00104_65_258_camera_top.jpg", + "images/task_00104_65_261_camera_top.jpg", + "images/task_00104_65_264_camera_top.jpg", + "images/task_00104_65_267_camera_top.jpg", + "images/task_00104_65_270_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. transfer\nC. release\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/task_00104_65_446_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. contact\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/task_00104_65_102_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_00104_65_203_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. contact\nC. transfer\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/task_00104_65_278_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00104_65_382_camera_top.jpg", + "images/task_00104_65_505_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00104_65_452_camera_top.jpg", + "images/task_00104_65_455_camera_top.jpg", + "images/task_00104_65_458_camera_top.jpg", + "images/task_00104_65_461_camera_top.jpg", + "images/task_00104_65_464_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00104_65_170_camera_top.jpg", + "images/task_00104_65_173_camera_top.jpg", + "images/task_00104_65_176_camera_top.jpg", + "images/task_00104_65_179_camera_top.jpg", + "images/task_00104_65_182_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00104_65_262_camera_top.jpg", + "images/task_00104_65_435_camera_top.jpg", + "images/task_00104_65_225_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. approach" + } + ], + "images": [ + "images/task_00104_65_437_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00104_65_248_camera_top_t-6.jpg", + "images/task_00104_65_251_camera_top_t-3.jpg", + "images/task_00104_65_254_camera_top_t0.jpg", + "images/task_00104_65_257_camera_top_t+3.jpg", + "images/task_00104_65_260_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_00104_65_591_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00104_65_232_camera_top.jpg", + "images/task_00104_65_418_camera_top.jpg", + "images/task_00104_65_351_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00104_65_458_camera_top.jpg", + "images/task_00104_65_461_camera_top.jpg", + "images/task_00104_65_464_camera_top.jpg", + "images/task_00104_65_467_camera_top.jpg", + "images/task_00104_65_470_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. transfer\nC. contact\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/task_00104_65_208_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. release\nC. transfer\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. approach" + } + ], + "images": [ + "images/task_00104_65_441_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the right side of the scene\nB. toward the top of the scene\nC. toward the bottom of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the right side of the scene" + } + ], + "images": [ + "images/task_00104_129_508_camera_top.jpg", + "images/task_00104_129_513_camera_top.jpg", + "images/task_00104_129_518_camera_top.jpg", + "images/task_00104_129_523_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00104_129_395_camera_top.jpg", + "images/task_00104_129_492_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. release\nC. transfer\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. approach" + } + ], + "images": [ + "images/task_00104_129_167_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. transfer\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/task_00104_129_153_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00104_129_250_camera_top.jpg", + "images/task_00104_129_148_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. contact\nC. pre-approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/task_00104_129_485_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00104_129_344_camera_top.jpg", + "images/task_00104_129_347_camera_top.jpg", + "images/task_00104_129_350_camera_top.jpg", + "images/task_00104_129_353_camera_top.jpg", + "images/task_00104_129_356_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00104_129_102_camera_top.jpg", + "images/task_00104_129_105_camera_top.jpg", + "images/task_00104_129_108_camera_top.jpg", + "images/task_00104_129_111_camera_top.jpg", + "images/task_00104_129_114_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. hold and carry\nC. contact\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/task_00104_129_137_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00104_129_287_camera_top_t-6.jpg", + "images/task_00104_129_290_camera_top_t-3.jpg", + "images/task_00104_129_293_camera_top_t0.jpg", + "images/task_00104_129_296_camera_top_t+3.jpg", + "images/task_00104_129_299_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00104_129_309_camera_top_t-6.jpg", + "images/task_00104_129_312_camera_top_t-3.jpg", + "images/task_00104_129_315_camera_top_t0.jpg", + "images/task_00104_129_318_camera_top_t+3.jpg", + "images/task_00104_129_321_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00104_129_391_camera_top.jpg", + "images/task_00104_129_394_camera_top.jpg", + "images/task_00104_129_397_camera_top.jpg", + "images/task_00104_129_400_camera_top.jpg", + "images/task_00104_129_403_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00104_129_45_camera_top.jpg", + "images/task_00104_129_48_camera_top.jpg", + "images/task_00104_129_51_camera_top.jpg", + "images/task_00104_129_54_camera_top.jpg", + "images/task_00104_129_57_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00104_129_100_camera_top.jpg", + "images/task_00104_129_194_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. pre-approach\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/task_00104_129_579_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the right side of the scene\nA. toward the bottom of the scene\nB. toward the top of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the right side of the scene" + } + ], + "images": [ + "images/task_00104_129_502_camera_top.jpg", + "images/task_00104_129_507_camera_top.jpg", + "images/task_00104_129_512_camera_top.jpg", + "images/task_00104_129_517_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_00104_129_422_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_00104_129_508_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00104_129_296_camera_top.jpg", + "images/task_00104_129_299_camera_top.jpg", + "images/task_00104_129_302_camera_top.jpg", + "images/task_00104_129_305_camera_top.jpg", + "images/task_00104_129_308_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the top of the scene\nB. toward the bottom of the scene\nC. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00104_129_500_camera_top.jpg", + "images/task_00104_129_505_camera_top.jpg", + "images/task_00104_129_510_camera_top.jpg", + "images/task_00104_129_515_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00104_129_117_camera_top.jpg", + "images/task_00104_129_120_camera_top.jpg", + "images/task_00104_129_123_camera_top.jpg", + "images/task_00104_129_126_camera_top.jpg", + "images/task_00104_129_129_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. transfer\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. approach" + } + ], + "images": [ + "images/task_00104_129_4_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00104_129_41_camera_top.jpg", + "images/task_00104_129_44_camera_top.jpg", + "images/task_00104_129_47_camera_top.jpg", + "images/task_00104_129_50_camera_top.jpg", + "images/task_00104_129_53_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_00104_129_569_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. transfer\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/task_00104_129_355_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the right side of the scene\nB. toward the bottom of the scene\nC. toward the left side of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the right side of the scene" + } + ], + "images": [ + "images/task_00104_129_509_camera_top.jpg", + "images/task_00104_129_514_camera_top.jpg", + "images/task_00104_129_519_camera_top.jpg", + "images/task_00104_129_524_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. contact\nC. transfer\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/task_00104_129_30_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. hold and carry\nC. contact\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/task_00104_129_514_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00104_129_130_camera_top.jpg", + "images/task_00104_129_133_camera_top.jpg", + "images/task_00104_129_136_camera_top.jpg", + "images/task_00104_129_139_camera_top.jpg", + "images/task_00104_129_142_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00104_129_31_camera_top.jpg", + "images/task_00104_129_34_camera_top.jpg", + "images/task_00104_129_37_camera_top.jpg", + "images/task_00104_129_40_camera_top.jpg", + "images/task_00104_129_43_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_00104_129_577_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the bottom of the scene\nC. toward the left side of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00104_129_505_camera_top.jpg", + "images/task_00104_129_510_camera_top.jpg", + "images/task_00104_129_515_camera_top.jpg", + "images/task_00104_129_520_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00104", + "split": "eval", + "split_group_id": "task_00104__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00105_0_755_camera_top.jpg", + "images/task_00105_0_758_camera_top.jpg", + "images/task_00105_0_761_camera_top.jpg", + "images/task_00105_0_764_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00105_0_584_camera_top.jpg", + "images/task_00105_0_587_camera_top.jpg", + "images/task_00105_0_590_camera_top.jpg", + "images/task_00105_0_593_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00105_0_1308_camera_top.jpg", + "images/task_00105_0_1311_camera_top.jpg", + "images/task_00105_0_1314_camera_top.jpg", + "images/task_00105_0_1317_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the left side of the scene\nC. toward the top of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00105_0_969_camera_top.jpg", + "images/task_00105_0_974_camera_top.jpg", + "images/task_00105_0_979_camera_top.jpg", + "images/task_00105_0_984_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. transfer\nC. approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/task_00105_0_1629_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_00105_0_1640_camera_top.jpg", + "images/task_00105_0_1128_camera_top.jpg", + "images/task_00105_0_1803_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00105_0_785_camera_top_t-6.jpg", + "images/task_00105_0_788_camera_top_t-3.jpg", + "images/task_00105_0_791_camera_top_t0.jpg", + "images/task_00105_0_794_camera_top_t+3.jpg", + "images/task_00105_0_797_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_00105_0_525_camera_top.jpg", + "images/task_00105_0_1820_camera_top.jpg", + "images/task_00105_0_1076_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00105_0_1217_camera_top_t-6.jpg", + "images/task_00105_0_1220_camera_top_t-3.jpg", + "images/task_00105_0_1223_camera_top_t0.jpg", + "images/task_00105_0_1226_camera_top_t+3.jpg", + "images/task_00105_0_1229_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00105_0_757_camera_top.jpg", + "images/task_00105_0_760_camera_top.jpg", + "images/task_00105_0_763_camera_top.jpg", + "images/task_00105_0_766_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00105_0_1193_camera_top_t-6.jpg", + "images/task_00105_0_1196_camera_top_t-3.jpg", + "images/task_00105_0_1199_camera_top_t0.jpg", + "images/task_00105_0_1202_camera_top_t+3.jpg", + "images/task_00105_0_1205_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00105_0_1694_camera_top.jpg", + "images/task_00105_0_1697_camera_top.jpg", + "images/task_00105_0_1700_camera_top.jpg", + "images/task_00105_0_1703_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00105_0_1764_camera_top.jpg", + "images/task_00105_0_533_camera_top.jpg", + "images/task_00105_0_1349_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00105_0_1263_camera_top.jpg", + "images/task_00105_0_1266_camera_top.jpg", + "images/task_00105_0_1269_camera_top.jpg", + "images/task_00105_0_1272_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00105_0_1594_camera_top.jpg", + "images/task_00105_0_1597_camera_top.jpg", + "images/task_00105_0_1600_camera_top.jpg", + "images/task_00105_0_1603_camera_top.jpg", + "images/task_00105_0_1606_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the left side of the scene\nA. toward the top of the scene\nB. toward the bottom of the scene\nC. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the left side of the scene" + } + ], + "images": [ + "images/task_00105_0_1253_camera_top.jpg", + "images/task_00105_0_1258_camera_top.jpg", + "images/task_00105_0_1263_camera_top.jpg", + "images/task_00105_0_1268_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00105_0_1201_camera_top.jpg", + "images/task_00105_0_1204_camera_top.jpg", + "images/task_00105_0_1207_camera_top.jpg", + "images/task_00105_0_1210_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. pre-approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. approach" + } + ], + "images": [ + "images/task_00105_0_1239_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. hold and carry\nC. pre-approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. approach" + } + ], + "images": [ + "images/task_00105_0_1064_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00105_0_365_camera_top.jpg", + "images/task_00105_0_368_camera_top.jpg", + "images/task_00105_0_371_camera_top.jpg", + "images/task_00105_0_374_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00105_0_612_camera_top.jpg", + "images/task_00105_0_615_camera_top.jpg", + "images/task_00105_0_618_camera_top.jpg", + "images/task_00105_0_621_camera_top.jpg", + "images/task_00105_0_624_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00105_0_1507_camera_top_t-6.jpg", + "images/task_00105_0_1510_camera_top_t-3.jpg", + "images/task_00105_0_1513_camera_top_t0.jpg", + "images/task_00105_0_1516_camera_top_t+3.jpg", + "images/task_00105_0_1519_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00105_0_1400_camera_top.jpg", + "images/task_00105_0_1403_camera_top.jpg", + "images/task_00105_0_1406_camera_top.jpg", + "images/task_00105_0_1409_camera_top.jpg", + "images/task_00105_0_1412_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00105_0_736_camera_top.jpg", + "images/task_00105_0_538_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00105_0_1437_camera_top.jpg", + "images/task_00105_0_1440_camera_top.jpg", + "images/task_00105_0_1443_camera_top.jpg", + "images/task_00105_0_1446_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. hold and carry\nC. contact\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/task_00105_0_1909_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00105_0_1691_camera_top.jpg", + "images/task_00105_0_1694_camera_top.jpg", + "images/task_00105_0_1697_camera_top.jpg", + "images/task_00105_0_1700_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00105_0_449_camera_top.jpg", + "images/task_00105_0_452_camera_top.jpg", + "images/task_00105_0_455_camera_top.jpg", + "images/task_00105_0_458_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00105_0_1746_camera_top.jpg", + "images/task_00105_0_1749_camera_top.jpg", + "images/task_00105_0_1752_camera_top.jpg", + "images/task_00105_0_1755_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00105_0_1325_camera_top.jpg", + "images/task_00105_0_1169_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the left side of the scene\nA. toward the right side of the scene\nB. toward the top of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the left side of the scene" + } + ], + "images": [ + "images/task_00105_0_1441_camera_top.jpg", + "images/task_00105_0_1446_camera_top.jpg", + "images/task_00105_0_1451_camera_top.jpg", + "images/task_00105_0_1456_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00105_0_1502_camera_top.jpg", + "images/task_00105_0_1505_camera_top.jpg", + "images/task_00105_0_1508_camera_top.jpg", + "images/task_00105_0_1511_camera_top.jpg", + "images/task_00105_0_1514_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00105_0_1133_camera_top.jpg", + "images/task_00105_0_1327_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00105_0_566_camera_top.jpg", + "images/task_00105_0_1808_camera_top.jpg", + "images/task_00105_0_1654_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00105_0_756_camera_top.jpg", + "images/task_00105_0_759_camera_top.jpg", + "images/task_00105_0_762_camera_top.jpg", + "images/task_00105_0_765_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the left side of the scene\nC. toward the bottom of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00105_0_1343_camera_top.jpg", + "images/task_00105_0_1348_camera_top.jpg", + "images/task_00105_0_1353_camera_top.jpg", + "images/task_00105_0_1358_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the left side of the scene\nA. toward the bottom of the scene\nB. toward the right side of the scene\nC. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the left side of the scene" + } + ], + "images": [ + "images/task_00105_0_1444_camera_top.jpg", + "images/task_00105_0_1449_camera_top.jpg", + "images/task_00105_0_1454_camera_top.jpg", + "images/task_00105_0_1459_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00105_0_1692_camera_top.jpg", + "images/task_00105_0_1695_camera_top.jpg", + "images/task_00105_0_1698_camera_top.jpg", + "images/task_00105_0_1701_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. contact\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/task_00105_0_695_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00105_0_1835_camera_top.jpg", + "images/task_00105_0_1838_camera_top.jpg", + "images/task_00105_0_1841_camera_top.jpg", + "images/task_00105_0_1844_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00105_0_515_camera_top.jpg", + "images/task_00105_0_518_camera_top.jpg", + "images/task_00105_0_521_camera_top.jpg", + "images/task_00105_0_524_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the top of the scene\nC. toward the bottom of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00105_0_1341_camera_top.jpg", + "images/task_00105_0_1346_camera_top.jpg", + "images/task_00105_0_1351_camera_top.jpg", + "images/task_00105_0_1356_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00105_0_218_camera_top.jpg", + "images/task_00105_0_221_camera_top.jpg", + "images/task_00105_0_224_camera_top.jpg", + "images/task_00105_0_227_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the left side of the scene\nA. toward the bottom of the scene\nC. toward the top of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the left side of the scene" + } + ], + "images": [ + "images/task_00105_0_1759_camera_top.jpg", + "images/task_00105_0_1764_camera_top.jpg", + "images/task_00105_0_1769_camera_top.jpg", + "images/task_00105_0_1774_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00105_0_754_camera_top.jpg", + "images/task_00105_0_757_camera_top.jpg", + "images/task_00105_0_760_camera_top.jpg", + "images/task_00105_0_763_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the left side of the scene\nB. toward the right side of the scene\nC. toward the top of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the left side of the scene" + } + ], + "images": [ + "images/task_00105_0_1243_camera_top.jpg", + "images/task_00105_0_1248_camera_top.jpg", + "images/task_00105_0_1253_camera_top.jpg", + "images/task_00105_0_1258_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00105_0_1693_camera_top.jpg", + "images/task_00105_0_1696_camera_top.jpg", + "images/task_00105_0_1699_camera_top.jpg", + "images/task_00105_0_1702_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00105_0_981_camera_top.jpg", + "images/task_00105_0_984_camera_top.jpg", + "images/task_00105_0_987_camera_top.jpg", + "images/task_00105_0_990_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00105_0_1706_camera_top.jpg", + "images/task_00105_0_1770_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. pre-approach\nC. transfer\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/task_00105_0_1324_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00105_0_752_camera_top.jpg", + "images/task_00105_0_755_camera_top.jpg", + "images/task_00105_0_758_camera_top.jpg", + "images/task_00105_0_761_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. transfer\nC. approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/task_00105_0_1634_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00105_0_759_camera_top.jpg", + "images/task_00105_0_762_camera_top.jpg", + "images/task_00105_0_765_camera_top.jpg", + "images/task_00105_0_768_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00105_0_723_camera_top.jpg", + "images/task_00105_0_726_camera_top.jpg", + "images/task_00105_0_729_camera_top.jpg", + "images/task_00105_0_732_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00105_0_129_camera_top.jpg", + "images/task_00105_0_132_camera_top.jpg", + "images/task_00105_0_135_camera_top.jpg", + "images/task_00105_0_138_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00105_0_888_camera_top.jpg", + "images/task_00105_0_891_camera_top.jpg", + "images/task_00105_0_894_camera_top.jpg", + "images/task_00105_0_897_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the left side of the scene\nA. toward the right side of the scene\nB. toward the bottom of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the left side of the scene" + } + ], + "images": [ + "images/task_00105_0_1757_camera_top.jpg", + "images/task_00105_0_1762_camera_top.jpg", + "images/task_00105_0_1767_camera_top.jpg", + "images/task_00105_0_1772_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the bottom of the scene\nB. toward the top of the scene\nC. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00105_0_1702_camera_top.jpg", + "images/task_00105_0_1707_camera_top.jpg", + "images/task_00105_0_1712_camera_top.jpg", + "images/task_00105_0_1717_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00105_0_529_camera_top.jpg", + "images/task_00105_0_658_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00105_68_1017_camera_top.jpg", + "images/task_00105_68_1020_camera_top.jpg", + "images/task_00105_68_1023_camera_top.jpg", + "images/task_00105_68_1026_camera_top.jpg", + "images/task_00105_68_1029_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00105_68_509_camera_top.jpg", + "images/task_00105_68_512_camera_top.jpg", + "images/task_00105_68_515_camera_top.jpg", + "images/task_00105_68_518_camera_top.jpg", + "images/task_00105_68_521_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00105_68_528_camera_top.jpg", + "images/task_00105_68_531_camera_top.jpg", + "images/task_00105_68_534_camera_top.jpg", + "images/task_00105_68_537_camera_top.jpg", + "images/task_00105_68_540_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00105_68_995_camera_top_t-6.jpg", + "images/task_00105_68_998_camera_top_t-3.jpg", + "images/task_00105_68_1001_camera_top_t0.jpg", + "images/task_00105_68_1004_camera_top_t+3.jpg", + "images/task_00105_68_1007_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00105_68_1677_camera_top.jpg", + "images/task_00105_68_1680_camera_top.jpg", + "images/task_00105_68_1683_camera_top.jpg", + "images/task_00105_68_1686_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00105_68_289_camera_top.jpg", + "images/task_00105_68_292_camera_top.jpg", + "images/task_00105_68_295_camera_top.jpg", + "images/task_00105_68_298_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. pre-approach\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. hold and carry" + } + ], + "images": [ + "images/task_00105_68_1233_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. release\nC. transfer\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/task_00105_68_1715_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. contact\nC. hold and carry\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/task_00105_68_1702_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00105_68_417_camera_top.jpg", + "images/task_00105_68_647_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the left side of the scene\nA. toward the right side of the scene\nC. toward the top of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the left side of the scene" + } + ], + "images": [ + "images/task_00105_68_617_camera_top.jpg", + "images/task_00105_68_622_camera_top.jpg", + "images/task_00105_68_627_camera_top.jpg", + "images/task_00105_68_632_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00105_68_456_camera_top.jpg", + "images/task_00105_68_459_camera_top.jpg", + "images/task_00105_68_462_camera_top.jpg", + "images/task_00105_68_465_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the left side of the scene\nB. toward the top of the scene\nC. toward the right side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the left side of the scene" + } + ], + "images": [ + "images/task_00105_68_616_camera_top.jpg", + "images/task_00105_68_621_camera_top.jpg", + "images/task_00105_68_626_camera_top.jpg", + "images/task_00105_68_631_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_00105_68_616_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00105_68_917_camera_top_t-6.jpg", + "images/task_00105_68_920_camera_top_t-3.jpg", + "images/task_00105_68_923_camera_top_t0.jpg", + "images/task_00105_68_926_camera_top_t+3.jpg", + "images/task_00105_68_929_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00105_68_309_camera_top.jpg", + "images/task_00105_68_312_camera_top.jpg", + "images/task_00105_68_315_camera_top.jpg", + "images/task_00105_68_318_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00105_68_1485_camera_top_t-6.jpg", + "images/task_00105_68_1488_camera_top_t-3.jpg", + "images/task_00105_68_1491_camera_top_t0.jpg", + "images/task_00105_68_1494_camera_top_t+3.jpg", + "images/task_00105_68_1497_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00105_68_1472_camera_top.jpg", + "images/task_00105_68_1475_camera_top.jpg", + "images/task_00105_68_1478_camera_top.jpg", + "images/task_00105_68_1481_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00105_68_1411_camera_top.jpg", + "images/task_00105_68_1414_camera_top.jpg", + "images/task_00105_68_1417_camera_top.jpg", + "images/task_00105_68_1420_camera_top.jpg", + "images/task_00105_68_1423_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the bottom of the scene\nA. toward the left side of the scene\nB. toward the right side of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00105_68_1165_camera_top.jpg", + "images/task_00105_68_1170_camera_top.jpg", + "images/task_00105_68_1175_camera_top.jpg", + "images/task_00105_68_1180_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00105_68_1676_camera_top.jpg", + "images/task_00105_68_1679_camera_top.jpg", + "images/task_00105_68_1682_camera_top.jpg", + "images/task_00105_68_1685_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00105_68_417_camera_top.jpg", + "images/task_00105_68_656_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the top of the scene\nB. toward the left side of the scene\nC. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00105_68_377_camera_top.jpg", + "images/task_00105_68_382_camera_top.jpg", + "images/task_00105_68_387_camera_top.jpg", + "images/task_00105_68_392_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00105_68_1283_camera_top.jpg", + "images/task_00105_68_1286_camera_top.jpg", + "images/task_00105_68_1289_camera_top.jpg", + "images/task_00105_68_1292_camera_top.jpg", + "images/task_00105_68_1295_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00105_68_166_camera_top.jpg", + "images/task_00105_68_169_camera_top.jpg", + "images/task_00105_68_172_camera_top.jpg", + "images/task_00105_68_175_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00105_68_1678_camera_top.jpg", + "images/task_00105_68_1681_camera_top.jpg", + "images/task_00105_68_1684_camera_top.jpg", + "images/task_00105_68_1687_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00105_68_1339_camera_top.jpg", + "images/task_00105_68_1342_camera_top.jpg", + "images/task_00105_68_1345_camera_top.jpg", + "images/task_00105_68_1348_camera_top.jpg", + "images/task_00105_68_1351_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00105_68_313_camera_top.jpg", + "images/task_00105_68_316_camera_top.jpg", + "images/task_00105_68_319_camera_top.jpg", + "images/task_00105_68_322_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00105_68_879_camera_top.jpg", + "images/task_00105_68_882_camera_top.jpg", + "images/task_00105_68_885_camera_top.jpg", + "images/task_00105_68_888_camera_top.jpg", + "images/task_00105_68_891_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_00105_68_669_camera_top.jpg", + "images/task_00105_68_1662_camera_top.jpg", + "images/task_00105_68_589_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_00105_68_1675_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_00105_68_414_camera_top.jpg", + "images/task_00105_68_1662_camera_top.jpg", + "images/task_00105_68_962_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the top of the scene\nA. toward the right side of the scene\nC. toward the bottom of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the top of the scene" + } + ], + "images": [ + "images/task_00105_68_1497_camera_top.jpg", + "images/task_00105_68_1502_camera_top.jpg", + "images/task_00105_68_1507_camera_top.jpg", + "images/task_00105_68_1512_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00105_68_793_camera_top.jpg", + "images/task_00105_68_697_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00105_68_1273_camera_top.jpg", + "images/task_00105_68_1276_camera_top.jpg", + "images/task_00105_68_1279_camera_top.jpg", + "images/task_00105_68_1282_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. pre-approach\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. pre-approach" + } + ], + "images": [ + "images/task_00105_68_376_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00105_68_1261_camera_top_t-6.jpg", + "images/task_00105_68_1258_camera_top_t-3.jpg", + "images/task_00105_68_1261_camera_top_t0.jpg", + "images/task_00105_68_1264_camera_top_t+3.jpg", + "images/task_00105_68_1267_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00105_68_1127_camera_top.jpg", + "images/task_00105_68_1130_camera_top.jpg", + "images/task_00105_68_1133_camera_top.jpg", + "images/task_00105_68_1136_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00105_68_1230_camera_top.jpg", + "images/task_00105_68_1062_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_00105_68_1313_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00105_68_1265_camera_top_t-6.jpg", + "images/task_00105_68_1268_camera_top_t-3.jpg", + "images/task_00105_68_1271_camera_top_t0.jpg", + "images/task_00105_68_1274_camera_top_t+3.jpg", + "images/task_00105_68_1277_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00105_68_806_camera_top.jpg", + "images/task_00105_68_809_camera_top.jpg", + "images/task_00105_68_812_camera_top.jpg", + "images/task_00105_68_815_camera_top.jpg", + "images/task_00105_68_818_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. approach\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/task_00105_68_698_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00105_68_1502_camera_top.jpg", + "images/task_00105_68_1505_camera_top.jpg", + "images/task_00105_68_1508_camera_top.jpg", + "images/task_00105_68_1511_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00105_68_239_camera_top.jpg", + "images/task_00105_68_242_camera_top.jpg", + "images/task_00105_68_245_camera_top.jpg", + "images/task_00105_68_248_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00105_68_538_camera_top.jpg", + "images/task_00105_68_541_camera_top.jpg", + "images/task_00105_68_544_camera_top.jpg", + "images/task_00105_68_547_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00105_68_655_camera_top.jpg", + "images/task_00105_68_412_camera_top.jpg", + "images/task_00105_68_548_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00105_68_1610_camera_top.jpg", + "images/task_00105_68_1613_camera_top.jpg", + "images/task_00105_68_1616_camera_top.jpg", + "images/task_00105_68_1619_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00105_68_310_camera_top.jpg", + "images/task_00105_68_313_camera_top.jpg", + "images/task_00105_68_316_camera_top.jpg", + "images/task_00105_68_319_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the left side of the scene\nB. toward the right side of the scene\nC. toward the bottom of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the left side of the scene" + } + ], + "images": [ + "images/task_00105_68_1541_camera_top.jpg", + "images/task_00105_68_1546_camera_top.jpg", + "images/task_00105_68_1551_camera_top.jpg", + "images/task_00105_68_1556_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. contact\nC. approach\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/task_00105_68_417_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00105_68_1112_camera_top.jpg", + "images/task_00105_68_1115_camera_top.jpg", + "images/task_00105_68_1118_camera_top.jpg", + "images/task_00105_68_1121_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00105_68_311_camera_top.jpg", + "images/task_00105_68_314_camera_top.jpg", + "images/task_00105_68_317_camera_top.jpg", + "images/task_00105_68_320_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00105_68_1383_camera_top.jpg", + "images/task_00105_68_1386_camera_top.jpg", + "images/task_00105_68_1389_camera_top.jpg", + "images/task_00105_68_1392_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00105_68_638_camera_top.jpg", + "images/task_00105_68_733_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00105_68_1232_camera_top.jpg", + "images/task_00105_68_616_camera_top.jpg", + "images/task_00105_68_575_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. both arms are active" + } + ], + "images": [ + "images/task_00105_68_314_camera_top.jpg", + "images/task_00105_68_317_camera_top.jpg", + "images/task_00105_68_320_camera_top.jpg", + "images/task_00105_68_323_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_00105_68_926_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00105_68_1272_camera_top.jpg", + "images/task_00105_68_1275_camera_top.jpg", + "images/task_00105_68_1278_camera_top.jpg", + "images/task_00105_68_1281_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. contact\nC. transfer\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/task_00105_68_1232_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. transfer\nC. contact\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. pre-approach" + } + ], + "images": [ + "images/task_00105_68_375_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00105_68_354_camera_top.jpg", + "images/task_00105_68_357_camera_top.jpg", + "images/task_00105_68_360_camera_top.jpg", + "images/task_00105_68_363_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00105_68_1189_camera_top.jpg", + "images/task_00105_68_1192_camera_top.jpg", + "images/task_00105_68_1195_camera_top.jpg", + "images/task_00105_68_1198_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00105_68_1274_camera_top.jpg", + "images/task_00105_68_1277_camera_top.jpg", + "images/task_00105_68_1280_camera_top.jpg", + "images/task_00105_68_1283_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the left side of the scene\nA. toward the bottom of the scene\nB. toward the top of the scene\nC. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the left side of the scene" + } + ], + "images": [ + "images/task_00105_68_1536_camera_top.jpg", + "images/task_00105_68_1541_camera_top.jpg", + "images/task_00105_68_1546_camera_top.jpg", + "images/task_00105_68_1551_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. release\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. approach" + } + ], + "images": [ + "images/task_00105_68_1145_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00105_68_1275_camera_top.jpg", + "images/task_00105_68_1278_camera_top.jpg", + "images/task_00105_68_1281_camera_top.jpg", + "images/task_00105_68_1284_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000068", + "recording_id": null, + "episode_id": 68, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00105_136_1452_camera_top.jpg", + "images/task_00105_136_1455_camera_top.jpg", + "images/task_00105_136_1458_camera_top.jpg", + "images/task_00105_136_1461_camera_top.jpg", + "images/task_00105_136_1464_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00105_136_1349_camera_top.jpg", + "images/task_00105_136_1352_camera_top.jpg", + "images/task_00105_136_1355_camera_top.jpg", + "images/task_00105_136_1358_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00105_136_1222_camera_top.jpg", + "images/task_00105_136_1225_camera_top.jpg", + "images/task_00105_136_1228_camera_top.jpg", + "images/task_00105_136_1231_camera_top.jpg", + "images/task_00105_136_1234_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00105_136_1468_camera_top.jpg", + "images/task_00105_136_1471_camera_top.jpg", + "images/task_00105_136_1474_camera_top.jpg", + "images/task_00105_136_1477_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00105_136_355_camera_top.jpg", + "images/task_00105_136_811_camera_top.jpg", + "images/task_00105_136_1300_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the right side of the scene\nA. toward the left side of the scene\nB. toward the top of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the right side of the scene" + } + ], + "images": [ + "images/task_00105_136_1017_camera_top.jpg", + "images/task_00105_136_1022_camera_top.jpg", + "images/task_00105_136_1027_camera_top.jpg", + "images/task_00105_136_1032_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00105_136_622_camera_top.jpg", + "images/task_00105_136_625_camera_top.jpg", + "images/task_00105_136_628_camera_top.jpg", + "images/task_00105_136_631_camera_top.jpg", + "images/task_00105_136_634_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00105_136_1129_camera_top_t-6.jpg", + "images/task_00105_136_1132_camera_top_t-3.jpg", + "images/task_00105_136_1135_camera_top_t0.jpg", + "images/task_00105_136_1138_camera_top_t+3.jpg", + "images/task_00105_136_1141_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the right side of the scene\nB. toward the bottom of the scene\nC. toward the top of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the right side of the scene" + } + ], + "images": [ + "images/task_00105_136_1018_camera_top.jpg", + "images/task_00105_136_1023_camera_top.jpg", + "images/task_00105_136_1028_camera_top.jpg", + "images/task_00105_136_1033_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00105_136_189_camera_top.jpg", + "images/task_00105_136_192_camera_top.jpg", + "images/task_00105_136_195_camera_top.jpg", + "images/task_00105_136_198_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. hold and carry\nC. pre-approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/task_00105_136_1313_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00105_136_419_camera_top.jpg", + "images/task_00105_136_422_camera_top.jpg", + "images/task_00105_136_425_camera_top.jpg", + "images/task_00105_136_428_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. transfer\nC. contact\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/task_00105_136_1468_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00105_136_523_camera_top.jpg", + "images/task_00105_136_302_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00105_136_795_camera_top.jpg", + "images/task_00105_136_798_camera_top.jpg", + "images/task_00105_136_801_camera_top.jpg", + "images/task_00105_136_804_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. contact\nC. release\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/task_00105_136_174_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. contact\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/task_00105_136_186_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_00105_136_1389_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00105_136_570_camera_top.jpg", + "images/task_00105_136_573_camera_top.jpg", + "images/task_00105_136_576_camera_top.jpg", + "images/task_00105_136_579_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00105_136_1019_camera_top.jpg", + "images/task_00105_136_1022_camera_top.jpg", + "images/task_00105_136_1025_camera_top.jpg", + "images/task_00105_136_1028_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00105_136_1464_camera_top.jpg", + "images/task_00105_136_1467_camera_top.jpg", + "images/task_00105_136_1470_camera_top.jpg", + "images/task_00105_136_1473_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00105_136_972_camera_top.jpg", + "images/task_00105_136_1192_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00105_136_925_camera_top.jpg", + "images/task_00105_136_928_camera_top.jpg", + "images/task_00105_136_931_camera_top.jpg", + "images/task_00105_136_934_camera_top.jpg", + "images/task_00105_136_937_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. release\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. approach" + } + ], + "images": [ + "images/task_00105_136_1175_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00105_136_792_camera_top.jpg", + "images/task_00105_136_1496_camera_top.jpg", + "images/task_00105_136_720_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00105_136_1466_camera_top.jpg", + "images/task_00105_136_1469_camera_top.jpg", + "images/task_00105_136_1472_camera_top.jpg", + "images/task_00105_136_1475_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00105_136_491_camera_top.jpg", + "images/task_00105_136_494_camera_top.jpg", + "images/task_00105_136_497_camera_top.jpg", + "images/task_00105_136_500_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00105_136_1469_camera_top.jpg", + "images/task_00105_136_1472_camera_top.jpg", + "images/task_00105_136_1475_camera_top.jpg", + "images/task_00105_136_1478_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00105_136_1182_camera_top.jpg", + "images/task_00105_136_1185_camera_top.jpg", + "images/task_00105_136_1188_camera_top.jpg", + "images/task_00105_136_1191_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00105_136_98_camera_top.jpg", + "images/task_00105_136_101_camera_top.jpg", + "images/task_00105_136_104_camera_top.jpg", + "images/task_00105_136_107_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_00105_136_380_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00105_136_265_camera_top.jpg", + "images/task_00105_136_268_camera_top.jpg", + "images/task_00105_136_271_camera_top.jpg", + "images/task_00105_136_274_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00105_136_1082_camera_top.jpg", + "images/task_00105_136_1085_camera_top.jpg", + "images/task_00105_136_1088_camera_top.jpg", + "images/task_00105_136_1091_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00105_136_1227_camera_top.jpg", + "images/task_00105_136_1230_camera_top.jpg", + "images/task_00105_136_1233_camera_top.jpg", + "images/task_00105_136_1236_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00105_136_236_camera_top.jpg", + "images/task_00105_136_239_camera_top.jpg", + "images/task_00105_136_242_camera_top.jpg", + "images/task_00105_136_245_camera_top.jpg", + "images/task_00105_136_248_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the bottom of the scene\nB. toward the top of the scene\nC. toward the left side of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00105_136_1095_camera_top.jpg", + "images/task_00105_136_1100_camera_top.jpg", + "images/task_00105_136_1105_camera_top.jpg", + "images/task_00105_136_1110_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00105_136_193_camera_top.jpg", + "images/task_00105_136_196_camera_top.jpg", + "images/task_00105_136_199_camera_top.jpg", + "images/task_00105_136_202_camera_top.jpg", + "images/task_00105_136_205_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00105_136_224_camera_top.jpg", + "images/task_00105_136_227_camera_top.jpg", + "images/task_00105_136_230_camera_top.jpg", + "images/task_00105_136_233_camera_top.jpg", + "images/task_00105_136_236_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00105_136_638_camera_top.jpg", + "images/task_00105_136_810_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. contact\nC. hold and carry\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/task_00105_136_521_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the right side of the scene\nA. toward the top of the scene\nB. toward the bottom of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the right side of the scene" + } + ], + "images": [ + "images/task_00105_136_1074_camera_top.jpg", + "images/task_00105_136_1079_camera_top.jpg", + "images/task_00105_136_1084_camera_top.jpg", + "images/task_00105_136_1089_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00105_136_658_camera_top.jpg", + "images/task_00105_136_661_camera_top.jpg", + "images/task_00105_136_664_camera_top.jpg", + "images/task_00105_136_667_camera_top.jpg", + "images/task_00105_136_670_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the bottom of the scene\nA. toward the left side of the scene\nB. toward the right side of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00105_136_1097_camera_top.jpg", + "images/task_00105_136_1102_camera_top.jpg", + "images/task_00105_136_1107_camera_top.jpg", + "images/task_00105_136_1112_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the left side of the scene\nA. toward the top of the scene\nB. toward the right side of the scene\nC. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the left side of the scene" + } + ], + "images": [ + "images/task_00105_136_1038_camera_top.jpg", + "images/task_00105_136_1043_camera_top.jpg", + "images/task_00105_136_1048_camera_top.jpg", + "images/task_00105_136_1053_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00105_136_135_camera_top.jpg", + "images/task_00105_136_138_camera_top.jpg", + "images/task_00105_136_141_camera_top.jpg", + "images/task_00105_136_144_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00105_136_669_camera_top_t-6.jpg", + "images/task_00105_136_672_camera_top_t-3.jpg", + "images/task_00105_136_675_camera_top_t0.jpg", + "images/task_00105_136_678_camera_top_t+3.jpg", + "images/task_00105_136_681_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. contact\nC. transfer\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/task_00105_136_1253_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_00105_136_52_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00105_136_838_camera_top.jpg", + "images/task_00105_136_841_camera_top.jpg", + "images/task_00105_136_844_camera_top.jpg", + "images/task_00105_136_847_camera_top.jpg", + "images/task_00105_136_850_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_00105_136_643_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_00105_136_1507_camera_top.jpg", + "images/task_00105_136_560_camera_top.jpg", + "images/task_00105_136_458_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00105_136_890_camera_top.jpg", + "images/task_00105_136_893_camera_top.jpg", + "images/task_00105_136_896_camera_top.jpg", + "images/task_00105_136_899_camera_top.jpg", + "images/task_00105_136_902_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. release\nC. approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/task_00105_136_1541_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the right side of the scene\nB. toward the bottom of the scene\nC. toward the top of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the right side of the scene" + } + ], + "images": [ + "images/task_00105_136_1076_camera_top.jpg", + "images/task_00105_136_1081_camera_top.jpg", + "images/task_00105_136_1086_camera_top.jpg", + "images/task_00105_136_1091_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the top of the scene\nB. toward the bottom of the scene\nC. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00105_136_1218_camera_top.jpg", + "images/task_00105_136_1223_camera_top.jpg", + "images/task_00105_136_1228_camera_top.jpg", + "images/task_00105_136_1233_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_00105_136_460_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. only the left arm is active" + } + ], + "images": [ + "images/task_00105_136_1463_camera_top.jpg", + "images/task_00105_136_1466_camera_top.jpg", + "images/task_00105_136_1469_camera_top.jpg", + "images/task_00105_136_1472_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00105_136_1288_camera_top.jpg", + "images/task_00105_136_1291_camera_top.jpg", + "images/task_00105_136_1294_camera_top.jpg", + "images/task_00105_136_1297_camera_top.jpg", + "images/task_00105_136_1300_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00105_136_571_camera_top.jpg", + "images/task_00105_136_574_camera_top.jpg", + "images/task_00105_136_577_camera_top.jpg", + "images/task_00105_136_580_camera_top.jpg", + "images/task_00105_136_583_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. both arms are idle" + } + ], + "images": [ + "images/task_00105_136_794_camera_top.jpg", + "images/task_00105_136_797_camera_top.jpg", + "images/task_00105_136_800_camera_top.jpg", + "images/task_00105_136_803_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00105_136_1446_camera_top_t-6.jpg", + "images/task_00105_136_1449_camera_top_t-3.jpg", + "images/task_00105_136_1452_camera_top_t0.jpg", + "images/task_00105_136_1455_camera_top_t+3.jpg", + "images/task_00105_136_1458_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. transfer\nC. release\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/task_00105_136_1297_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_00105_136_1231_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00105_136_1387_camera_top.jpg", + "images/task_00105_136_1390_camera_top.jpg", + "images/task_00105_136_1393_camera_top.jpg", + "images/task_00105_136_1396_camera_top.jpg", + "images/task_00105_136_1399_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. hold and carry\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. hold and carry" + } + ], + "images": [ + "images/task_00105_136_1224_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_00105_136_459_camera_top.jpg", + "images/task_00105_136_615_camera_top.jpg", + "images/task_00105_136_1508_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00105_136_181_camera_top_t-6.jpg", + "images/task_00105_136_184_camera_top_t-3.jpg", + "images/task_00105_136_187_camera_top_t0.jpg", + "images/task_00105_136_190_camera_top_t+3.jpg", + "images/task_00105_136_193_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. contact\nC. pre-approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/task_00105_136_177_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. both arms are active\nB. only the left arm is active\nC. only the right arm is active\nD. both arms are idle\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. only the right arm is active" + } + ], + "images": [ + "images/task_00105_136_351_camera_top.jpg", + "images/task_00105_136_354_camera_top.jpg", + "images/task_00105_136_357_camera_top.jpg", + "images/task_00105_136_360_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T4", + "task_id": "task_00105", + "split": "eval", + "split_group_id": "task_00105__episode_000136", + "recording_id": null, + "episode_id": 136, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the left side of the scene\nA. toward the top of the scene\nC. toward the bottom of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the left side of the scene" + } + ], + "images": [ + "images/task_00106_0_426_camera_top.jpg", + "images/task_00106_0_431_camera_top.jpg", + "images/task_00106_0_436_camera_top.jpg", + "images/task_00106_0_441_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the left side of the scene\nB. toward the right side of the scene\nC. toward the bottom of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the left side of the scene" + } + ], + "images": [ + "images/task_00106_0_427_camera_top.jpg", + "images/task_00106_0_432_camera_top.jpg", + "images/task_00106_0_437_camera_top.jpg", + "images/task_00106_0_442_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00106_0_91_camera_top.jpg", + "images/task_00106_0_235_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00106_0_287_camera_top_t-6.jpg", + "images/task_00106_0_290_camera_top_t-3.jpg", + "images/task_00106_0_293_camera_top_t0.jpg", + "images/task_00106_0_296_camera_top_t+3.jpg", + "images/task_00106_0_299_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. approach\nC. pre-approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/task_00106_0_1877_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the right side of the scene\nA. toward the left side of the scene\nB. toward the bottom of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the right side of the scene" + } + ], + "images": [ + "images/task_00106_0_1723_camera_top.jpg", + "images/task_00106_0_1728_camera_top.jpg", + "images/task_00106_0_1733_camera_top.jpg", + "images/task_00106_0_1738_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the top of the scene\nC. toward the bottom of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00106_0_1730_camera_top.jpg", + "images/task_00106_0_1735_camera_top.jpg", + "images/task_00106_0_1740_camera_top.jpg", + "images/task_00106_0_1745_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the top of the scene\nC. toward the bottom of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00106_0_1726_camera_top.jpg", + "images/task_00106_0_1731_camera_top.jpg", + "images/task_00106_0_1736_camera_top.jpg", + "images/task_00106_0_1741_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00106_0_1748_camera_top_t-6.jpg", + "images/task_00106_0_1751_camera_top_t-3.jpg", + "images/task_00106_0_1754_camera_top_t0.jpg", + "images/task_00106_0_1757_camera_top_t+3.jpg", + "images/task_00106_0_1760_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. pre-approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. approach" + } + ], + "images": [ + "images/task_00106_0_85_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. release\nC. approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/task_00106_0_1839_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the bottom of the scene\nB. toward the left side of the scene\nC. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00106_0_1721_camera_top.jpg", + "images/task_00106_0_1726_camera_top.jpg", + "images/task_00106_0_1731_camera_top.jpg", + "images/task_00106_0_1736_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00106_0_1567_camera_top_t-6.jpg", + "images/task_00106_0_1570_camera_top_t-3.jpg", + "images/task_00106_0_1573_camera_top_t0.jpg", + "images/task_00106_0_1576_camera_top_t+3.jpg", + "images/task_00106_0_1579_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. release\nC. pre-approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/task_00106_0_927_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. approach\nC. transfer\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. pre-approach" + } + ], + "images": [ + "images/task_00106_0_26_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00106_0_918_camera_top_t-6.jpg", + "images/task_00106_0_921_camera_top_t-3.jpg", + "images/task_00106_0_924_camera_top_t0.jpg", + "images/task_00106_0_927_camera_top_t+3.jpg", + "images/task_00106_0_930_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00106_0_502_camera_top_t-6.jpg", + "images/task_00106_0_505_camera_top_t-3.jpg", + "images/task_00106_0_508_camera_top_t0.jpg", + "images/task_00106_0_511_camera_top_t+3.jpg", + "images/task_00106_0_514_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00106_0_1568_camera_top_t-6.jpg", + "images/task_00106_0_1571_camera_top_t-3.jpg", + "images/task_00106_0_1574_camera_top_t0.jpg", + "images/task_00106_0_1577_camera_top_t+3.jpg", + "images/task_00106_0_1580_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the right side of the scene\nB. toward the left side of the scene\nC. toward the top of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the right side of the scene" + } + ], + "images": [ + "images/task_00106_0_1728_camera_top.jpg", + "images/task_00106_0_1733_camera_top.jpg", + "images/task_00106_0_1738_camera_top.jpg", + "images/task_00106_0_1743_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_00106_168_562_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000168", + "recording_id": null, + "episode_id": 168, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the top of the scene\nC. toward the left side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00106_168_907_camera_top.jpg", + "images/task_00106_168_912_camera_top.jpg", + "images/task_00106_168_917_camera_top.jpg", + "images/task_00106_168_922_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000168", + "recording_id": null, + "episode_id": 168, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_00106_168_511_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000168", + "recording_id": null, + "episode_id": 168, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. transfer\nC. hold and carry\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. hold and carry" + } + ], + "images": [ + "images/task_00106_168_786_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000168", + "recording_id": null, + "episode_id": 168, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00106_168_815_camera_top.jpg", + "images/task_00106_168_818_camera_top.jpg", + "images/task_00106_168_821_camera_top.jpg", + "images/task_00106_168_824_camera_top.jpg", + "images/task_00106_168_827_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000168", + "recording_id": null, + "episode_id": 168, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. contact\nC. approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/task_00106_168_172_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000168", + "recording_id": null, + "episode_id": 168, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00106_168_402_camera_top.jpg", + "images/task_00106_168_405_camera_top.jpg", + "images/task_00106_168_408_camera_top.jpg", + "images/task_00106_168_411_camera_top.jpg", + "images/task_00106_168_414_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000168", + "recording_id": null, + "episode_id": 168, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. contact\nC. transfer\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. approach" + } + ], + "images": [ + "images/task_00106_168_636_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000168", + "recording_id": null, + "episode_id": 168, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the left side of the scene\nA. toward the right side of the scene\nB. toward the top of the scene\nC. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the left side of the scene" + } + ], + "images": [ + "images/task_00106_168_582_camera_top.jpg", + "images/task_00106_168_587_camera_top.jpg", + "images/task_00106_168_592_camera_top.jpg", + "images/task_00106_168_597_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000168", + "recording_id": null, + "episode_id": 168, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_00106_168_713_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000168", + "recording_id": null, + "episode_id": 168, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. transfer\nC. release\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. approach" + } + ], + "images": [ + "images/task_00106_168_709_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000168", + "recording_id": null, + "episode_id": 168, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00106_168_566_camera_top.jpg", + "images/task_00106_168_634_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000168", + "recording_id": null, + "episode_id": 168, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00106_168_400_camera_top.jpg", + "images/task_00106_168_526_camera_top.jpg", + "images/task_00106_168_635_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000168", + "recording_id": null, + "episode_id": 168, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00106_168_799_camera_top.jpg", + "images/task_00106_168_802_camera_top.jpg", + "images/task_00106_168_805_camera_top.jpg", + "images/task_00106_168_808_camera_top.jpg", + "images/task_00106_168_811_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000168", + "recording_id": null, + "episode_id": 168, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. transfer\nC. approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/task_00106_168_610_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000168", + "recording_id": null, + "episode_id": 168, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the right side of the scene\nA. toward the left side of the scene\nB. toward the bottom of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the right side of the scene" + } + ], + "images": [ + "images/task_00106_168_832_camera_top.jpg", + "images/task_00106_168_837_camera_top.jpg", + "images/task_00106_168_842_camera_top.jpg", + "images/task_00106_168_847_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000168", + "recording_id": null, + "episode_id": 168, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00106_168_425_camera_top_t-6.jpg", + "images/task_00106_168_428_camera_top_t-3.jpg", + "images/task_00106_168_431_camera_top_t0.jpg", + "images/task_00106_168_434_camera_top_t+3.jpg", + "images/task_00106_168_437_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000168", + "recording_id": null, + "episode_id": 168, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/task_00106_168_466_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000168", + "recording_id": null, + "episode_id": 168, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. contact\nC. approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. hold and carry" + } + ], + "images": [ + "images/task_00106_168_807_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000168", + "recording_id": null, + "episode_id": 168, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. release\nC. approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/task_00106_168_469_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000168", + "recording_id": null, + "episode_id": 168, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00106_168_758_camera_top.jpg", + "images/task_00106_168_688_camera_top.jpg", + "images/task_00106_168_806_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000168", + "recording_id": null, + "episode_id": 168, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_00106_168_291_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000168", + "recording_id": null, + "episode_id": 168, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the top of the scene\nC. toward the bottom of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00106_168_595_camera_top.jpg", + "images/task_00106_168_600_camera_top.jpg", + "images/task_00106_168_605_camera_top.jpg", + "images/task_00106_168_610_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000168", + "recording_id": null, + "episode_id": 168, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the left side of the scene\nB. toward the top of the scene\nC. toward the right side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the left side of the scene" + } + ], + "images": [ + "images/task_00106_168_579_camera_top.jpg", + "images/task_00106_168_584_camera_top.jpg", + "images/task_00106_168_589_camera_top.jpg", + "images/task_00106_168_594_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000168", + "recording_id": null, + "episode_id": 168, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. hold and carry\nC. approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. approach" + } + ], + "images": [ + "images/task_00106_168_730_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000168", + "recording_id": null, + "episode_id": 168, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_00106_168_494_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000168", + "recording_id": null, + "episode_id": 168, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00106_168_543_camera_top.jpg", + "images/task_00106_168_624_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000168", + "recording_id": null, + "episode_id": 168, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00106_168_496_camera_top_t-6.jpg", + "images/task_00106_168_499_camera_top_t-3.jpg", + "images/task_00106_168_502_camera_top_t0.jpg", + "images/task_00106_168_505_camera_top_t+3.jpg", + "images/task_00106_168_508_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000168", + "recording_id": null, + "episode_id": 168, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. contact\nC. transfer\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/task_00106_168_286_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000168", + "recording_id": null, + "episode_id": 168, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00106_168_300_camera_top.jpg", + "images/task_00106_168_303_camera_top.jpg", + "images/task_00106_168_306_camera_top.jpg", + "images/task_00106_168_309_camera_top.jpg", + "images/task_00106_168_312_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000168", + "recording_id": null, + "episode_id": 168, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. hold and carry\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. hold and carry" + } + ], + "images": [ + "images/task_00106_168_538_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000168", + "recording_id": null, + "episode_id": 168, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. transfer\nC. hold and carry\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. hold and carry" + } + ], + "images": [ + "images/task_00106_168_798_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000168", + "recording_id": null, + "episode_id": 168, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00106_168_610_camera_top.jpg", + "images/task_00106_168_681_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000168", + "recording_id": null, + "episode_id": 168, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00106_168_115_camera_top.jpg", + "images/task_00106_168_118_camera_top.jpg", + "images/task_00106_168_121_camera_top.jpg", + "images/task_00106_168_124_camera_top.jpg", + "images/task_00106_168_127_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000168", + "recording_id": null, + "episode_id": 168, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_00106_168_815_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000168", + "recording_id": null, + "episode_id": 168, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. contact\nC. transfer\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/task_00106_168_903_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000168", + "recording_id": null, + "episode_id": 168, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_00106_168_1084_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000168", + "recording_id": null, + "episode_id": 168, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00106_168_938_camera_top_t-6.jpg", + "images/task_00106_168_941_camera_top_t-3.jpg", + "images/task_00106_168_944_camera_top_t0.jpg", + "images/task_00106_168_947_camera_top_t+3.jpg", + "images/task_00106_168_950_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000168", + "recording_id": null, + "episode_id": 168, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00106_168_775_camera_top.jpg", + "images/task_00106_168_778_camera_top.jpg", + "images/task_00106_168_781_camera_top.jpg", + "images/task_00106_168_784_camera_top.jpg", + "images/task_00106_168_787_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000168", + "recording_id": null, + "episode_id": 168, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00106_168_778_camera_top_t-6.jpg", + "images/task_00106_168_781_camera_top_t-3.jpg", + "images/task_00106_168_784_camera_top_t0.jpg", + "images/task_00106_168_787_camera_top_t+3.jpg", + "images/task_00106_168_790_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000168", + "recording_id": null, + "episode_id": 168, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00106_168_738_camera_top.jpg", + "images/task_00106_168_610_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000168", + "recording_id": null, + "episode_id": 168, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the bottom of the scene\nB. toward the top of the scene\nC. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00106_168_831_camera_top.jpg", + "images/task_00106_168_836_camera_top.jpg", + "images/task_00106_168_841_camera_top.jpg", + "images/task_00106_168_846_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000168", + "recording_id": null, + "episode_id": 168, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the left side of the scene\nB. toward the bottom of the scene\nC. toward the right side of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the left side of the scene" + } + ], + "images": [ + "images/task_00106_168_581_camera_top.jpg", + "images/task_00106_168_586_camera_top.jpg", + "images/task_00106_168_591_camera_top.jpg", + "images/task_00106_168_596_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00106", + "split": "eval", + "split_group_id": "task_00106__episode_000168", + "recording_id": null, + "episode_id": 168, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00107_0_574_camera_top.jpg", + "images/task_00107_0_370_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00107_0_353_camera_top.jpg", + "images/task_00107_0_356_camera_top.jpg", + "images/task_00107_0_359_camera_top.jpg", + "images/task_00107_0_362_camera_top.jpg", + "images/task_00107_0_365_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the bottom of the scene\nB. toward the left side of the scene\nC. toward the right side of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00107_0_422_camera_top.jpg", + "images/task_00107_0_427_camera_top.jpg", + "images/task_00107_0_432_camera_top.jpg", + "images/task_00107_0_437_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_00107_0_648_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. pre-approach" + } + ], + "images": [ + "images/task_00107_0_108_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the bottom of the scene\nA. toward the top of the scene\nB. toward the right side of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00107_0_419_camera_top.jpg", + "images/task_00107_0_424_camera_top.jpg", + "images/task_00107_0_429_camera_top.jpg", + "images/task_00107_0_434_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. release\nC. approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. approach" + } + ], + "images": [ + "images/task_00107_0_128_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00107_0_145_camera_top.jpg", + "images/task_00107_0_317_camera_top.jpg", + "images/task_00107_0_525_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00107_0_442_camera_top.jpg", + "images/task_00107_0_445_camera_top.jpg", + "images/task_00107_0_448_camera_top.jpg", + "images/task_00107_0_451_camera_top.jpg", + "images/task_00107_0_454_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00107_0_526_camera_top.jpg", + "images/task_00107_0_788_camera_top.jpg", + "images/task_00107_0_141_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. transfer\nC. pre-approach\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/task_00107_0_150_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the top of the scene\nA. toward the left side of the scene\nB. toward the bottom of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the top of the scene" + } + ], + "images": [ + "images/task_00107_0_646_camera_top.jpg", + "images/task_00107_0_651_camera_top.jpg", + "images/task_00107_0_656_camera_top.jpg", + "images/task_00107_0_661_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. release\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/task_00107_0_876_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00107_0_504_camera_top_t-6.jpg", + "images/task_00107_0_507_camera_top_t-3.jpg", + "images/task_00107_0_510_camera_top_t0.jpg", + "images/task_00107_0_513_camera_top_t+3.jpg", + "images/task_00107_0_516_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. release\nC. transfer\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. pre-approach" + } + ], + "images": [ + "images/task_00107_0_117_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. transfer\nC. hold and carry\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/task_00107_0_686_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. hold and carry\nC. approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. approach" + } + ], + "images": [ + "images/task_00107_0_123_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. hold and carry\nC. release\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/task_00107_0_152_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_00107_0_55_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_00107_0_135_camera_top.jpg", + "images/task_00107_0_553_camera_top.jpg", + "images/task_00107_0_810_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_00107_0_495_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00107_0_394_camera_top_t-6.jpg", + "images/task_00107_0_397_camera_top_t-3.jpg", + "images/task_00107_0_400_camera_top_t0.jpg", + "images/task_00107_0_403_camera_top_t+3.jpg", + "images/task_00107_0_406_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. pre-approach\nC. transfer\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/task_00107_0_864_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00107_0_305_camera_top.jpg", + "images/task_00107_0_126_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the bottom of the scene\nB. toward the top of the scene\nC. toward the left side of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00107_0_420_camera_top.jpg", + "images/task_00107_0_425_camera_top.jpg", + "images/task_00107_0_430_camera_top.jpg", + "images/task_00107_0_435_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. transfer\nC. pre-approach\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. approach" + } + ], + "images": [ + "images/task_00107_0_122_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. approach" + } + ], + "images": [ + "images/task_00107_0_141_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00107_0_140_camera_top.jpg", + "images/task_00107_0_252_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00107_0_783_camera_top_t-6.jpg", + "images/task_00107_0_786_camera_top_t-3.jpg", + "images/task_00107_0_789_camera_top_t0.jpg", + "images/task_00107_0_792_camera_top_t+3.jpg", + "images/task_00107_0_795_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the top of the scene\nA. toward the right side of the scene\nC. toward the bottom of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the top of the scene" + } + ], + "images": [ + "images/task_00107_0_651_camera_top.jpg", + "images/task_00107_0_656_camera_top.jpg", + "images/task_00107_0_661_camera_top.jpg", + "images/task_00107_0_666_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00107_0_135_camera_top.jpg", + "images/task_00107_0_223_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00107_0_776_camera_top_t-6.jpg", + "images/task_00107_0_779_camera_top_t-3.jpg", + "images/task_00107_0_782_camera_top_t0.jpg", + "images/task_00107_0_785_camera_top_t+3.jpg", + "images/task_00107_0_788_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00107_0_801_camera_top.jpg", + "images/task_00107_0_646_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. pre-approach\nC. approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/task_00107_0_153_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00107_0_577_camera_top.jpg", + "images/task_00107_0_580_camera_top.jpg", + "images/task_00107_0_583_camera_top.jpg", + "images/task_00107_0_586_camera_top.jpg", + "images/task_00107_0_589_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00107_0_636_camera_top_t-6.jpg", + "images/task_00107_0_639_camera_top_t-3.jpg", + "images/task_00107_0_642_camera_top_t0.jpg", + "images/task_00107_0_645_camera_top_t+3.jpg", + "images/task_00107_0_648_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. hold and carry\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. hold and carry" + } + ], + "images": [ + "images/task_00107_0_297_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00107_0_529_camera_top_t-6.jpg", + "images/task_00107_0_532_camera_top_t-3.jpg", + "images/task_00107_0_535_camera_top_t0.jpg", + "images/task_00107_0_538_camera_top_t+3.jpg", + "images/task_00107_0_541_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00107_65_155_camera_top.jpg", + "images/task_00107_65_158_camera_top.jpg", + "images/task_00107_65_161_camera_top.jpg", + "images/task_00107_65_164_camera_top.jpg", + "images/task_00107_65_167_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00107_65_730_camera_top.jpg", + "images/task_00107_65_733_camera_top.jpg", + "images/task_00107_65_736_camera_top.jpg", + "images/task_00107_65_739_camera_top.jpg", + "images/task_00107_65_742_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00107_65_175_camera_top_t-6.jpg", + "images/task_00107_65_178_camera_top_t-3.jpg", + "images/task_00107_65_181_camera_top_t0.jpg", + "images/task_00107_65_184_camera_top_t+3.jpg", + "images/task_00107_65_187_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00107_65_158_camera_top.jpg", + "images/task_00107_65_302_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_00107_65_952_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the top of the scene\nC. toward the left side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00107_65_1001_camera_top.jpg", + "images/task_00107_65_1006_camera_top.jpg", + "images/task_00107_65_1011_camera_top.jpg", + "images/task_00107_65_1016_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. contact\nC. pre-approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. hold and carry" + } + ], + "images": [ + "images/task_00107_65_456_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the bottom of the scene\nA. toward the top of the scene\nB. toward the left side of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00107_65_397_camera_top.jpg", + "images/task_00107_65_402_camera_top.jpg", + "images/task_00107_65_407_camera_top.jpg", + "images/task_00107_65_412_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00107_65_810_camera_top_t-6.jpg", + "images/task_00107_65_813_camera_top_t-3.jpg", + "images/task_00107_65_816_camera_top_t0.jpg", + "images/task_00107_65_819_camera_top_t+3.jpg", + "images/task_00107_65_822_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the bottom of the scene\nA. toward the left side of the scene\nB. toward the top of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00107_65_399_camera_top.jpg", + "images/task_00107_65_404_camera_top.jpg", + "images/task_00107_65_409_camera_top.jpg", + "images/task_00107_65_414_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. transfer\nC. release\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/task_00107_65_543_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. transfer\nC. contact\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/task_00107_65_599_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. transfer\nC. pre-approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/task_00107_65_337_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00107_65_174_camera_top.jpg", + "images/task_00107_65_177_camera_top.jpg", + "images/task_00107_65_180_camera_top.jpg", + "images/task_00107_65_183_camera_top.jpg", + "images/task_00107_65_186_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_00107_65_886_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00107_65_406_camera_top_t-6.jpg", + "images/task_00107_65_409_camera_top_t-3.jpg", + "images/task_00107_65_412_camera_top_t0.jpg", + "images/task_00107_65_412_camera_top_t+3.jpg", + "images/task_00107_65_418_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the bottom of the scene\nB. toward the top of the scene\nC. toward the left side of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00107_65_395_camera_top.jpg", + "images/task_00107_65_400_camera_top.jpg", + "images/task_00107_65_405_camera_top.jpg", + "images/task_00107_65_410_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00107_65_136_camera_top.jpg", + "images/task_00107_65_335_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the bottom of the scene\nA. toward the top of the scene\nC. toward the left side of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00107_65_396_camera_top.jpg", + "images/task_00107_65_401_camera_top.jpg", + "images/task_00107_65_406_camera_top.jpg", + "images/task_00107_65_411_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00107_65_401_camera_top_t-6.jpg", + "images/task_00107_65_404_camera_top_t-3.jpg", + "images/task_00107_65_407_camera_top_t0.jpg", + "images/task_00107_65_410_camera_top_t+3.jpg", + "images/task_00107_65_413_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00107_65_979_camera_top_t-6.jpg", + "images/task_00107_65_982_camera_top_t-3.jpg", + "images/task_00107_65_985_camera_top_t0.jpg", + "images/task_00107_65_988_camera_top_t+3.jpg", + "images/task_00107_65_991_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00107_65_167_camera_top.jpg", + "images/task_00107_65_170_camera_top.jpg", + "images/task_00107_65_173_camera_top.jpg", + "images/task_00107_65_176_camera_top.jpg", + "images/task_00107_65_179_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. pre-approach\nC. hold and carry\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. hold and carry" + } + ], + "images": [ + "images/task_00107_65_936_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00107_65_315_camera_top.jpg", + "images/task_00107_65_134_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00107_65_156_camera_top.jpg", + "images/task_00107_65_786_camera_top.jpg", + "images/task_00107_65_363_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. hold and carry\nC. approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. approach" + } + ], + "images": [ + "images/task_00107_65_131_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the right side of the scene\nB. toward the top of the scene\nC. toward the bottom of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the right side of the scene" + } + ], + "images": [ + "images/task_00107_65_991_camera_top.jpg", + "images/task_00107_65_996_camera_top.jpg", + "images/task_00107_65_1001_camera_top.jpg", + "images/task_00107_65_1006_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_00107_65_579_camera_top.jpg", + "images/task_00107_65_1030_camera_top.jpg", + "images/task_00107_65_140_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00107_65_589_camera_top.jpg", + "images/task_00107_65_592_camera_top.jpg", + "images/task_00107_65_595_camera_top.jpg", + "images/task_00107_65_598_camera_top.jpg", + "images/task_00107_65_601_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00107_65_502_camera_top.jpg", + "images/task_00107_65_505_camera_top.jpg", + "images/task_00107_65_508_camera_top.jpg", + "images/task_00107_65_511_camera_top.jpg", + "images/task_00107_65_514_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00107_65_335_camera_top.jpg", + "images/task_00107_65_436_camera_top.jpg", + "images/task_00107_65_714_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00107_65_380_camera_top.jpg", + "images/task_00107_65_934_camera_top.jpg", + "images/task_00107_65_1067_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00107_65_754_camera_top.jpg", + "images/task_00107_65_192_camera_top.jpg", + "images/task_00107_65_1016_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00107_65_334_camera_top.jpg", + "images/task_00107_65_146_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00107_65_271_camera_top.jpg", + "images/task_00107_65_465_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. pre-approach\nC. contact\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. approach" + } + ], + "images": [ + "images/task_00107_65_325_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00107_129_249_camera_top.jpg", + "images/task_00107_129_252_camera_top.jpg", + "images/task_00107_129_255_camera_top.jpg", + "images/task_00107_129_258_camera_top.jpg", + "images/task_00107_129_261_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_00107_129_1364_camera_top.jpg", + "images/task_00107_129_433_camera_top.jpg", + "images/task_00107_129_981_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_00107_129_65_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the left side of the scene\nB. toward the bottom of the scene\nC. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00107_129_1284_camera_top.jpg", + "images/task_00107_129_1289_camera_top.jpg", + "images/task_00107_129_1294_camera_top.jpg", + "images/task_00107_129_1299_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the bottom of the scene\nA. toward the right side of the scene\nB. toward the left side of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00107_129_283_camera_top.jpg", + "images/task_00107_129_288_camera_top.jpg", + "images/task_00107_129_293_camera_top.jpg", + "images/task_00107_129_298_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. pre-approach\nC. contact\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/task_00107_129_228_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00107_129_294_camera_top.jpg", + "images/task_00107_129_383_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00107_129_665_camera_top.jpg", + "images/task_00107_129_668_camera_top.jpg", + "images/task_00107_129_671_camera_top.jpg", + "images/task_00107_129_674_camera_top.jpg", + "images/task_00107_129_677_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00107_129_136_camera_top.jpg", + "images/task_00107_129_414_camera_top.jpg", + "images/task_00107_129_1279_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. transfer\nC. hold and carry\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/task_00107_129_434_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the left side of the scene\nB. toward the bottom of the scene\nC. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00107_129_501_camera_top.jpg", + "images/task_00107_129_506_camera_top.jpg", + "images/task_00107_129_511_camera_top.jpg", + "images/task_00107_129_516_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_00107_129_128_camera_top.jpg", + "images/task_00107_129_1340_camera_top.jpg", + "images/task_00107_129_1244_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. transfer\nC. contact\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/task_00107_129_430_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the right side of the scene\nA. toward the top of the scene\nB. toward the bottom of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the right side of the scene" + } + ], + "images": [ + "images/task_00107_129_524_camera_top.jpg", + "images/task_00107_129_529_camera_top.jpg", + "images/task_00107_129_534_camera_top.jpg", + "images/task_00107_129_539_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. approach\nC. transfer\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. approach" + } + ], + "images": [ + "images/task_00107_129_191_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_00107_129_1191_camera_top.jpg", + "images/task_00107_129_155_camera_top.jpg", + "images/task_00107_129_1304_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the bottom of the scene\nB. toward the left side of the scene\nC. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00107_129_504_camera_top.jpg", + "images/task_00107_129_509_camera_top.jpg", + "images/task_00107_129_514_camera_top.jpg", + "images/task_00107_129_519_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00107_129_339_camera_top.jpg", + "images/task_00107_129_132_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00107_129_171_camera_top.jpg", + "images/task_00107_129_430_camera_top.jpg", + "images/task_00107_129_891_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. release\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. hold and carry" + } + ], + "images": [ + "images/task_00107_129_496_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the bottom of the scene\nA. toward the top of the scene\nB. toward the left side of the scene\nC. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00107_129_646_camera_top.jpg", + "images/task_00107_129_651_camera_top.jpg", + "images/task_00107_129_656_camera_top.jpg", + "images/task_00107_129_661_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00107_129_207_camera_top.jpg", + "images/task_00107_129_137_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00107_129_763_camera_top_t-6.jpg", + "images/task_00107_129_766_camera_top_t-3.jpg", + "images/task_00107_129_769_camera_top_t0.jpg", + "images/task_00107_129_772_camera_top_t+3.jpg", + "images/task_00107_129_775_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00107_129_1334_camera_top.jpg", + "images/task_00107_129_1217_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00107_129_838_camera_top.jpg", + "images/task_00107_129_841_camera_top.jpg", + "images/task_00107_129_844_camera_top.jpg", + "images/task_00107_129_847_camera_top.jpg", + "images/task_00107_129_850_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. contact\nC. hold and carry\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. pre-approach" + } + ], + "images": [ + "images/task_00107_129_114_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the right side of the scene\nA. toward the left side of the scene\nB. toward the top of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the right side of the scene" + } + ], + "images": [ + "images/task_00107_129_1285_camera_top.jpg", + "images/task_00107_129_1290_camera_top.jpg", + "images/task_00107_129_1295_camera_top.jpg", + "images/task_00107_129_1300_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00107_129_134_camera_top.jpg", + "images/task_00107_129_282_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_00107_129_696_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_00107_129_264_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the top of the scene\nB. toward the left side of the scene\nC. toward the bottom of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the top of the scene" + } + ], + "images": [ + "images/task_00107_129_589_camera_top.jpg", + "images/task_00107_129_594_camera_top.jpg", + "images/task_00107_129_599_camera_top.jpg", + "images/task_00107_129_604_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the top of the scene\nB. toward the bottom of the scene\nC. toward the right side of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the top of the scene" + } + ], + "images": [ + "images/task_00107_129_1094_camera_top.jpg", + "images/task_00107_129_1099_camera_top.jpg", + "images/task_00107_129_1104_camera_top.jpg", + "images/task_00107_129_1109_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. contact\nC. pre-approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/task_00107_129_1445_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_00107_129_1331_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the bottom of the scene\nA. toward the top of the scene\nB. toward the right side of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00107_129_638_camera_top.jpg", + "images/task_00107_129_643_camera_top.jpg", + "images/task_00107_129_648_camera_top.jpg", + "images/task_00107_129_653_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the bottom of the scene\nA. toward the top of the scene\nC. toward the right side of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00107_129_649_camera_top.jpg", + "images/task_00107_129_654_camera_top.jpg", + "images/task_00107_129_659_camera_top.jpg", + "images/task_00107_129_664_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00107_129_1074_camera_top_t-6.jpg", + "images/task_00107_129_1077_camera_top_t-3.jpg", + "images/task_00107_129_1080_camera_top_t0.jpg", + "images/task_00107_129_1083_camera_top_t+3.jpg", + "images/task_00107_129_1086_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00107_129_297_camera_top.jpg", + "images/task_00107_129_300_camera_top.jpg", + "images/task_00107_129_303_camera_top.jpg", + "images/task_00107_129_306_camera_top.jpg", + "images/task_00107_129_309_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the left side of the scene\nA. toward the bottom of the scene\nB. toward the top of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the left side of the scene" + } + ], + "images": [ + "images/task_00107_129_382_camera_top.jpg", + "images/task_00107_129_387_camera_top.jpg", + "images/task_00107_129_392_camera_top.jpg", + "images/task_00107_129_397_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00107_129_1071_camera_top.jpg", + "images/task_00107_129_1074_camera_top.jpg", + "images/task_00107_129_1077_camera_top.jpg", + "images/task_00107_129_1080_camera_top.jpg", + "images/task_00107_129_1083_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00107_129_316_camera_top.jpg", + "images/task_00107_129_319_camera_top.jpg", + "images/task_00107_129_322_camera_top.jpg", + "images/task_00107_129_325_camera_top.jpg", + "images/task_00107_129_328_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_00107_129_324_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the bottom of the scene\nB. toward the left side of the scene\nC. toward the right side of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00107_129_647_camera_top.jpg", + "images/task_00107_129_652_camera_top.jpg", + "images/task_00107_129_657_camera_top.jpg", + "images/task_00107_129_662_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00107_129_1281_camera_top_t-6.jpg", + "images/task_00107_129_1284_camera_top_t-3.jpg", + "images/task_00107_129_1287_camera_top_t0.jpg", + "images/task_00107_129_1290_camera_top_t+3.jpg", + "images/task_00107_129_1293_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00107", + "split": "eval", + "split_group_id": "task_00107__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. release\nC. transfer\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/task_00108_0_103_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00108_0_258_camera_top.jpg", + "images/task_00108_0_261_camera_top.jpg", + "images/task_00108_0_264_camera_top.jpg", + "images/task_00108_0_267_camera_top.jpg", + "images/task_00108_0_270_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_00108_0_272_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00108_0_74_camera_top.jpg", + "images/task_00108_0_77_camera_top.jpg", + "images/task_00108_0_80_camera_top.jpg", + "images/task_00108_0_83_camera_top.jpg", + "images/task_00108_0_86_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00108_0_14_camera_top.jpg", + "images/task_00108_0_17_camera_top.jpg", + "images/task_00108_0_20_camera_top.jpg", + "images/task_00108_0_23_camera_top.jpg", + "images/task_00108_0_26_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the top of the scene\nA. toward the right side of the scene\nC. toward the bottom of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the top of the scene" + } + ], + "images": [ + "images/task_00108_0_812_camera_top.jpg", + "images/task_00108_0_817_camera_top.jpg", + "images/task_00108_0_822_camera_top.jpg", + "images/task_00108_0_827_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00108_0_287_camera_top.jpg", + "images/task_00108_0_290_camera_top.jpg", + "images/task_00108_0_293_camera_top.jpg", + "images/task_00108_0_296_camera_top.jpg", + "images/task_00108_0_299_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. hold and carry\nC. transfer\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. approach" + } + ], + "images": [ + "images/task_00108_0_188_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00108_0_488_camera_top.jpg", + "images/task_00108_0_491_camera_top.jpg", + "images/task_00108_0_494_camera_top.jpg", + "images/task_00108_0_497_camera_top.jpg", + "images/task_00108_0_500_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00108_0_458_camera_top_t-6.jpg", + "images/task_00108_0_461_camera_top_t-3.jpg", + "images/task_00108_0_464_camera_top_t0.jpg", + "images/task_00108_0_467_camera_top_t+3.jpg", + "images/task_00108_0_470_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00108_0_647_camera_top_t-6.jpg", + "images/task_00108_0_650_camera_top_t-3.jpg", + "images/task_00108_0_653_camera_top_t0.jpg", + "images/task_00108_0_656_camera_top_t+3.jpg", + "images/task_00108_0_659_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. release\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/task_00108_0_780_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00108_0_93_camera_top_t-6.jpg", + "images/task_00108_0_96_camera_top_t-3.jpg", + "images/task_00108_0_99_camera_top_t0.jpg", + "images/task_00108_0_102_camera_top_t+3.jpg", + "images/task_00108_0_105_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00108_0_634_camera_top.jpg", + "images/task_00108_0_637_camera_top.jpg", + "images/task_00108_0_640_camera_top.jpg", + "images/task_00108_0_643_camera_top.jpg", + "images/task_00108_0_646_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00108_0_282_camera_top.jpg", + "images/task_00108_0_428_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. contact\nC. transfer\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/task_00108_0_311_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00108_0_368_camera_top.jpg", + "images/task_00108_0_486_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the left side of the scene\nA. toward the top of the scene\nB. toward the right side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the left side of the scene" + } + ], + "images": [ + "images/task_00108_0_690_camera_top.jpg", + "images/task_00108_0_695_camera_top.jpg", + "images/task_00108_0_700_camera_top.jpg", + "images/task_00108_0_705_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the left side of the scene\nB. toward the bottom of the scene\nC. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00108_0_614_camera_top.jpg", + "images/task_00108_0_619_camera_top.jpg", + "images/task_00108_0_624_camera_top.jpg", + "images/task_00108_0_629_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the top of the scene\nB. toward the right side of the scene\nC. toward the left side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the top of the scene" + } + ], + "images": [ + "images/task_00108_0_220_camera_top.jpg", + "images/task_00108_0_225_camera_top.jpg", + "images/task_00108_0_230_camera_top.jpg", + "images/task_00108_0_235_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_00108_0_148_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00108_0_335_camera_top_t-6.jpg", + "images/task_00108_0_338_camera_top_t-3.jpg", + "images/task_00108_0_341_camera_top_t0.jpg", + "images/task_00108_0_344_camera_top_t+3.jpg", + "images/task_00108_0_347_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00108_0_398_camera_top.jpg", + "images/task_00108_0_839_camera_top.jpg", + "images/task_00108_0_184_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00108_0_53_camera_top.jpg", + "images/task_00108_0_56_camera_top.jpg", + "images/task_00108_0_59_camera_top.jpg", + "images/task_00108_0_62_camera_top.jpg", + "images/task_00108_0_65_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. hold and carry\nC. transfer\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/task_00108_0_345_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00108_0_120_camera_top.jpg", + "images/task_00108_0_285_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. transfer\nC. release\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/task_00108_0_851_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00108_0_673_camera_top.jpg", + "images/task_00108_0_676_camera_top.jpg", + "images/task_00108_0_679_camera_top.jpg", + "images/task_00108_0_682_camera_top.jpg", + "images/task_00108_0_685_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00108_0_140_camera_top.jpg", + "images/task_00108_0_288_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. contact\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/task_00108_0_838_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00108_0_330_camera_top.jpg", + "images/task_00108_0_333_camera_top.jpg", + "images/task_00108_0_336_camera_top.jpg", + "images/task_00108_0_339_camera_top.jpg", + "images/task_00108_0_342_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. contact\nC. pre-approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/task_00108_65_245_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00108_65_196_camera_top.jpg", + "images/task_00108_65_300_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_00108_65_262_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. contact\nC. approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/task_00108_65_217_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the top of the scene\nA. toward the bottom of the scene\nB. toward the left side of the scene\nC. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the top of the scene" + } + ], + "images": [ + "images/task_00108_65_281_camera_top.jpg", + "images/task_00108_65_286_camera_top.jpg", + "images/task_00108_65_291_camera_top.jpg", + "images/task_00108_65_296_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. hold and carry\nC. transfer\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/task_00108_65_285_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/task_00108_65_323_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. pre-approach\nC. transfer\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. approach" + } + ], + "images": [ + "images/task_00108_65_56_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_00108_65_184_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. release\nC. pre-approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. pre-approach" + } + ], + "images": [ + "images/task_00108_65_11_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the left side of the scene\nC. toward the top of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00108_65_150_camera_top.jpg", + "images/task_00108_65_155_camera_top.jpg", + "images/task_00108_65_160_camera_top.jpg", + "images/task_00108_65_165_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00108_65_279_camera_top.jpg", + "images/task_00108_65_282_camera_top.jpg", + "images/task_00108_65_285_camera_top.jpg", + "images/task_00108_65_288_camera_top.jpg", + "images/task_00108_65_291_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_00108_65_292_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the bottom of the scene\nB. toward the top of the scene\nC. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00108_65_151_camera_top.jpg", + "images/task_00108_65_156_camera_top.jpg", + "images/task_00108_65_161_camera_top.jpg", + "images/task_00108_65_166_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. contact\nC. release\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. pre-approach" + } + ], + "images": [ + "images/task_00108_65_2_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_00108_65_207_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00108_65_234_camera_top.jpg", + "images/task_00108_65_237_camera_top.jpg", + "images/task_00108_65_240_camera_top.jpg", + "images/task_00108_65_243_camera_top.jpg", + "images/task_00108_65_246_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the bottom of the scene\nA. toward the top of the scene\nB. toward the right side of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00108_65_211_camera_top.jpg", + "images/task_00108_65_216_camera_top.jpg", + "images/task_00108_65_221_camera_top.jpg", + "images/task_00108_65_226_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the bottom of the scene\nA. toward the left side of the scene\nC. toward the top of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00108_65_209_camera_top.jpg", + "images/task_00108_65_214_camera_top.jpg", + "images/task_00108_65_219_camera_top.jpg", + "images/task_00108_65_224_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00108_65_255_camera_top_t-6.jpg", + "images/task_00108_65_258_camera_top_t-3.jpg", + "images/task_00108_65_261_camera_top_t0.jpg", + "images/task_00108_65_264_camera_top_t+3.jpg", + "images/task_00108_65_267_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. pre-approach\nC. hold and carry\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/task_00108_65_151_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/task_00108_65_396_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00108_65_125_camera_top.jpg", + "images/task_00108_65_128_camera_top.jpg", + "images/task_00108_65_131_camera_top.jpg", + "images/task_00108_65_134_camera_top.jpg", + "images/task_00108_65_137_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_00108_65_309_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00108_65_180_camera_top_t-6.jpg", + "images/task_00108_65_183_camera_top_t-3.jpg", + "images/task_00108_65_186_camera_top_t0.jpg", + "images/task_00108_65_189_camera_top_t+3.jpg", + "images/task_00108_65_192_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/task_00108_65_375_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00108_65_238_camera_top_t-6.jpg", + "images/task_00108_65_241_camera_top_t-3.jpg", + "images/task_00108_65_244_camera_top_t0.jpg", + "images/task_00108_65_247_camera_top_t+3.jpg", + "images/task_00108_65_250_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00108_65_326_camera_top.jpg", + "images/task_00108_65_182_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the bottom of the scene\nB. toward the right side of the scene\nC. toward the left side of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00108_65_212_camera_top.jpg", + "images/task_00108_65_217_camera_top.jpg", + "images/task_00108_65_222_camera_top.jpg", + "images/task_00108_65_227_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/task_00108_65_114_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_00108_65_371_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. hold and carry" + } + ], + "images": [ + "images/task_00108_129_49_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the top of the scene\nB. toward the left side of the scene\nC. toward the bottom of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the top of the scene" + } + ], + "images": [ + "images/task_00108_129_245_camera_top.jpg", + "images/task_00108_129_250_camera_top.jpg", + "images/task_00108_129_255_camera_top.jpg", + "images/task_00108_129_260_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_00108_129_273_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. release\nC. approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/task_00108_129_345_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00108_129_150_camera_top.jpg", + "images/task_00108_129_153_camera_top.jpg", + "images/task_00108_129_156_camera_top.jpg", + "images/task_00108_129_159_camera_top.jpg", + "images/task_00108_129_162_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. approach\nC. transfer\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/task_00108_129_65_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. transfer\nC. hold and carry\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/task_00108_129_170_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00108_129_252_camera_top_t-6.jpg", + "images/task_00108_129_255_camera_top_t-3.jpg", + "images/task_00108_129_258_camera_top_t0.jpg", + "images/task_00108_129_261_camera_top_t+3.jpg", + "images/task_00108_129_264_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. hold and carry\nC. transfer\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/task_00108_129_2_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the top of the scene\nA. toward the left side of the scene\nB. toward the right side of the scene\nC. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the top of the scene" + } + ], + "images": [ + "images/task_00108_129_248_camera_top.jpg", + "images/task_00108_129_253_camera_top.jpg", + "images/task_00108_129_258_camera_top.jpg", + "images/task_00108_129_263_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00108_129_220_camera_top.jpg", + "images/task_00108_129_223_camera_top.jpg", + "images/task_00108_129_226_camera_top.jpg", + "images/task_00108_129_229_camera_top.jpg", + "images/task_00108_129_232_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00108_129_47_camera_top.jpg", + "images/task_00108_129_50_camera_top.jpg", + "images/task_00108_129_53_camera_top.jpg", + "images/task_00108_129_56_camera_top.jpg", + "images/task_00108_129_59_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00108_129_90_camera_top_t-6.jpg", + "images/task_00108_129_93_camera_top_t-3.jpg", + "images/task_00108_129_96_camera_top_t0.jpg", + "images/task_00108_129_99_camera_top_t+3.jpg", + "images/task_00108_129_102_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_00108_129_282_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the top of the scene\nB. toward the bottom of the scene\nC. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00108_129_151_camera_top.jpg", + "images/task_00108_129_156_camera_top.jpg", + "images/task_00108_129_161_camera_top.jpg", + "images/task_00108_129_166_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00108_129_297_camera_top_t-6.jpg", + "images/task_00108_129_300_camera_top_t-3.jpg", + "images/task_00108_129_303_camera_top_t0.jpg", + "images/task_00108_129_306_camera_top_t+3.jpg", + "images/task_00108_129_309_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. release\nC. contact\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/task_00108_129_406_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the top of the scene\nA. toward the left side of the scene\nC. toward the right side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the top of the scene" + } + ], + "images": [ + "images/task_00108_129_244_camera_top.jpg", + "images/task_00108_129_249_camera_top.jpg", + "images/task_00108_129_254_camera_top.jpg", + "images/task_00108_129_259_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the top of the scene\nB. toward the left side of the scene\nC. toward the right side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the top of the scene" + } + ], + "images": [ + "images/task_00108_129_249_camera_top.jpg", + "images/task_00108_129_254_camera_top.jpg", + "images/task_00108_129_259_camera_top.jpg", + "images/task_00108_129_264_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the bottom of the scene\nA. toward the top of the scene\nB. toward the left side of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00108_129_98_camera_top.jpg", + "images/task_00108_129_103_camera_top.jpg", + "images/task_00108_129_108_camera_top.jpg", + "images/task_00108_129_113_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00108_129_307_camera_top.jpg", + "images/task_00108_129_175_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. hold and carry\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. hold and carry" + } + ], + "images": [ + "images/task_00108_129_103_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. hold and carry\nC. release\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. approach" + } + ], + "images": [ + "images/task_00108_129_303_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. hold and carry\nC. release\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/task_00108_129_355_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. hold and carry\nC. contact\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. approach" + } + ], + "images": [ + "images/task_00108_129_303_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00108", + "split": "eval", + "split_group_id": "task_00108__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00109_0_386_camera_top.jpg", + "images/task_00109_0_131_camera_top.jpg", + "images/task_00109_0_324_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. transfer\nC. release\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. approach" + } + ], + "images": [ + "images/task_00109_0_135_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00109_0_381_camera_top.jpg", + "images/task_00109_0_162_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00109_0_254_camera_top.jpg", + "images/task_00109_0_257_camera_top.jpg", + "images/task_00109_0_260_camera_top.jpg", + "images/task_00109_0_263_camera_top.jpg", + "images/task_00109_0_266_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. hold and carry\nC. pre-approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/task_00109_0_380_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the bottom of the scene\nB. toward the top of the scene\nC. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00109_0_357_camera_top.jpg", + "images/task_00109_0_362_camera_top.jpg", + "images/task_00109_0_367_camera_top.jpg", + "images/task_00109_0_372_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00109_0_199_camera_top_t-6.jpg", + "images/task_00109_0_199_camera_top_t-3.jpg", + "images/task_00109_0_199_camera_top_t0.jpg", + "images/task_00109_0_202_camera_top_t+3.jpg", + "images/task_00109_0_205_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the left side of the scene\nA. toward the right side of the scene\nB. toward the top of the scene\nC. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the left side of the scene" + } + ], + "images": [ + "images/task_00109_0_262_camera_top.jpg", + "images/task_00109_0_267_camera_top.jpg", + "images/task_00109_0_272_camera_top.jpg", + "images/task_00109_0_277_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00109_0_161_camera_top.jpg", + "images/task_00109_0_200_camera_top.jpg", + "images/task_00109_0_411_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. contact\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. approach" + } + ], + "images": [ + "images/task_00109_0_128_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00109_0_389_camera_top_t-6.jpg", + "images/task_00109_0_392_camera_top_t-3.jpg", + "images/task_00109_0_395_camera_top_t0.jpg", + "images/task_00109_0_395_camera_top_t+3.jpg", + "images/task_00109_0_401_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the left side of the scene\nA. toward the bottom of the scene\nC. toward the right side of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the left side of the scene" + } + ], + "images": [ + "images/task_00109_0_260_camera_top.jpg", + "images/task_00109_0_265_camera_top.jpg", + "images/task_00109_0_270_camera_top.jpg", + "images/task_00109_0_275_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00109_0_361_camera_top.jpg", + "images/task_00109_0_129_camera_top.jpg", + "images/task_00109_0_161_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. transfer\nC. pre-approach\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/task_00109_0_381_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_00109_0_420_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00109_0_197_camera_top.jpg", + "images/task_00109_0_379_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00109_0_134_camera_top.jpg", + "images/task_00109_0_415_camera_top.jpg", + "images/task_00109_0_380_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00109_0_153_camera_top.jpg", + "images/task_00109_0_385_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00109_0_218_camera_top.jpg", + "images/task_00109_0_221_camera_top.jpg", + "images/task_00109_0_224_camera_top.jpg", + "images/task_00109_0_227_camera_top.jpg", + "images/task_00109_0_230_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. release\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. hold and carry" + } + ], + "images": [ + "images/task_00109_0_235_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. release\nC. contact\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/task_00109_0_399_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the bottom of the scene\nC. toward the top of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00109_0_388_camera_top.jpg", + "images/task_00109_0_393_camera_top.jpg", + "images/task_00109_0_398_camera_top.jpg", + "images/task_00109_0_403_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00109_0_281_camera_top_t-6.jpg", + "images/task_00109_0_284_camera_top_t-3.jpg", + "images/task_00109_0_287_camera_top_t0.jpg", + "images/task_00109_0_290_camera_top_t+3.jpg", + "images/task_00109_0_293_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the right side of the scene\nA. toward the left side of the scene\nB. toward the top of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the right side of the scene" + } + ], + "images": [ + "images/task_00109_0_355_camera_top.jpg", + "images/task_00109_0_360_camera_top.jpg", + "images/task_00109_0_365_camera_top.jpg", + "images/task_00109_0_370_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_00109_0_207_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the left side of the scene\nA. toward the bottom of the scene\nB. toward the right side of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the left side of the scene" + } + ], + "images": [ + "images/task_00109_0_257_camera_top.jpg", + "images/task_00109_0_262_camera_top.jpg", + "images/task_00109_0_267_camera_top.jpg", + "images/task_00109_0_272_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. transfer\nC. approach\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. approach" + } + ], + "images": [ + "images/task_00109_0_133_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the right side of the scene\nA. toward the left side of the scene\nB. toward the bottom of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the right side of the scene" + } + ], + "images": [ + "images/task_00109_0_352_camera_top.jpg", + "images/task_00109_0_357_camera_top.jpg", + "images/task_00109_0_362_camera_top.jpg", + "images/task_00109_0_367_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00109_0_398_camera_top_t-6.jpg", + "images/task_00109_0_401_camera_top_t-3.jpg", + "images/task_00109_0_404_camera_top_t0.jpg", + "images/task_00109_0_407_camera_top_t+3.jpg", + "images/task_00109_0_410_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. transfer\nC. pre-approach\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. pre-approach" + } + ], + "images": [ + "images/task_00109_0_93_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_00109_0_403_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. transfer\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/task_00109_0_379_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00109_0_339_camera_top.jpg", + "images/task_00109_0_342_camera_top.jpg", + "images/task_00109_0_345_camera_top.jpg", + "images/task_00109_0_348_camera_top.jpg", + "images/task_00109_0_351_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/task_00109_0_158_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. contact\nC. hold and carry\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. hold and carry" + } + ], + "images": [ + "images/task_00109_0_290_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the left side of the scene\nA. toward the top of the scene\nC. toward the bottom of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the left side of the scene" + } + ], + "images": [ + "images/task_00109_0_126_camera_top.jpg", + "images/task_00109_0_131_camera_top.jpg", + "images/task_00109_0_136_camera_top.jpg", + "images/task_00109_0_141_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. approach\nC. transfer\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/task_00109_0_386_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the left side of the scene\nB. toward the right side of the scene\nC. toward the bottom of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the left side of the scene" + } + ], + "images": [ + "images/task_00109_0_261_camera_top.jpg", + "images/task_00109_0_266_camera_top.jpg", + "images/task_00109_0_271_camera_top.jpg", + "images/task_00109_0_276_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the right side of the scene\nB. toward the bottom of the scene\nC. toward the left side of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the right side of the scene" + } + ], + "images": [ + "images/task_00109_0_389_camera_top.jpg", + "images/task_00109_0_394_camera_top.jpg", + "images/task_00109_0_399_camera_top.jpg", + "images/task_00109_0_404_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00109_0_195_camera_top.jpg", + "images/task_00109_0_387_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00109_0_428_camera_top_t-6.jpg", + "images/task_00109_0_431_camera_top_t-3.jpg", + "images/task_00109_0_434_camera_top_t0.jpg", + "images/task_00109_0_437_camera_top_t+3.jpg", + "images/task_00109_0_440_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the top of the scene\nC. toward the bottom of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00109_65_193_camera_top.jpg", + "images/task_00109_65_198_camera_top.jpg", + "images/task_00109_65_203_camera_top.jpg", + "images/task_00109_65_208_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00109_65_104_camera_top.jpg", + "images/task_00109_65_266_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00109_65_442_camera_top_t-6.jpg", + "images/task_00109_65_445_camera_top_t-3.jpg", + "images/task_00109_65_448_camera_top_t0.jpg", + "images/task_00109_65_451_camera_top_t+3.jpg", + "images/task_00109_65_454_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00109_65_92_camera_top.jpg", + "images/task_00109_65_165_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00109_65_227_camera_top.jpg", + "images/task_00109_65_300_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00109_65_211_camera_top.jpg", + "images/task_00109_65_270_camera_top.jpg", + "images/task_00109_65_114_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00109_65_108_camera_top.jpg", + "images/task_00109_65_168_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00109_65_158_camera_top.jpg", + "images/task_00109_65_161_camera_top.jpg", + "images/task_00109_65_164_camera_top.jpg", + "images/task_00109_65_167_camera_top.jpg", + "images/task_00109_65_170_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the left side of the scene\nA. toward the right side of the scene\nC. toward the bottom of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the left side of the scene" + } + ], + "images": [ + "images/task_00109_65_228_camera_top.jpg", + "images/task_00109_65_233_camera_top.jpg", + "images/task_00109_65_238_camera_top.jpg", + "images/task_00109_65_243_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00109_65_432_camera_top_t-6.jpg", + "images/task_00109_65_435_camera_top_t-3.jpg", + "images/task_00109_65_438_camera_top_t0.jpg", + "images/task_00109_65_441_camera_top_t+3.jpg", + "images/task_00109_65_444_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. transfer\nC. pre-approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/task_00109_65_302_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the top of the scene\nB. toward the left side of the scene\nC. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00109_65_204_camera_top.jpg", + "images/task_00109_65_209_camera_top.jpg", + "images/task_00109_65_214_camera_top.jpg", + "images/task_00109_65_219_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. release\nC. approach\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. pre-approach" + } + ], + "images": [ + "images/task_00109_65_87_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. contact\nC. pre-approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. pre-approach" + } + ], + "images": [ + "images/task_00109_65_82_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the left side of the scene\nA. toward the bottom of the scene\nB. toward the top of the scene\nC. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the left side of the scene" + } + ], + "images": [ + "images/task_00109_65_230_camera_top.jpg", + "images/task_00109_65_235_camera_top.jpg", + "images/task_00109_65_240_camera_top.jpg", + "images/task_00109_65_245_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00109_65_441_camera_top_t-6.jpg", + "images/task_00109_65_444_camera_top_t-3.jpg", + "images/task_00109_65_447_camera_top_t0.jpg", + "images/task_00109_65_450_camera_top_t+3.jpg", + "images/task_00109_65_453_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_00109_65_173_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00109_65_124_camera_top.jpg", + "images/task_00109_65_188_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. transfer\nC. contact\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/task_00109_65_314_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00109_65_240_camera_top.jpg", + "images/task_00109_65_243_camera_top.jpg", + "images/task_00109_65_246_camera_top.jpg", + "images/task_00109_65_249_camera_top.jpg", + "images/task_00109_65_252_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. hold and carry\nC. contact\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/task_00109_65_124_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. pre-approach\nC. hold and carry\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. hold and carry" + } + ], + "images": [ + "images/task_00109_65_130_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_00109_65_253_camera_top.jpg", + "images/task_00109_65_188_camera_top.jpg", + "images/task_00109_65_108_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the right side of the scene\nA. toward the left side of the scene\nB. toward the bottom of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the right side of the scene" + } + ], + "images": [ + "images/task_00109_65_187_camera_top.jpg", + "images/task_00109_65_192_camera_top.jpg", + "images/task_00109_65_197_camera_top.jpg", + "images/task_00109_65_202_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the right side of the scene\nB. toward the bottom of the scene\nC. toward the left side of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the right side of the scene" + } + ], + "images": [ + "images/task_00109_65_194_camera_top.jpg", + "images/task_00109_65_199_camera_top.jpg", + "images/task_00109_65_204_camera_top.jpg", + "images/task_00109_65_209_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. pre-approach\nC. release\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/task_00109_65_122_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the bottom of the scene\nB. toward the left side of the scene\nC. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00109_65_185_camera_top.jpg", + "images/task_00109_65_190_camera_top.jpg", + "images/task_00109_65_195_camera_top.jpg", + "images/task_00109_65_200_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. pre-approach" + } + ], + "images": [ + "images/task_00109_65_74_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_00109_65_233_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00109_65_511_camera_top_t-6.jpg", + "images/task_00109_65_514_camera_top_t-3.jpg", + "images/task_00109_65_517_camera_top_t0.jpg", + "images/task_00109_65_520_camera_top_t+3.jpg", + "images/task_00109_65_523_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00109_65_224_camera_top.jpg", + "images/task_00109_65_227_camera_top.jpg", + "images/task_00109_65_230_camera_top.jpg", + "images/task_00109_65_233_camera_top.jpg", + "images/task_00109_65_236_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. hold and carry\nC. pre-approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/task_00109_65_269_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the left side of the scene\nB. toward the top of the scene\nC. toward the right side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the left side of the scene" + } + ], + "images": [ + "images/task_00109_65_225_camera_top.jpg", + "images/task_00109_65_230_camera_top.jpg", + "images/task_00109_65_235_camera_top.jpg", + "images/task_00109_65_240_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00109_65_158_camera_top_t-6.jpg", + "images/task_00109_65_161_camera_top_t-3.jpg", + "images/task_00109_65_164_camera_top_t0.jpg", + "images/task_00109_65_167_camera_top_t+3.jpg", + "images/task_00109_65_170_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the left side of the scene\nA. toward the top of the scene\nC. toward the right side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the left side of the scene" + } + ], + "images": [ + "images/task_00109_65_229_camera_top.jpg", + "images/task_00109_65_234_camera_top.jpg", + "images/task_00109_65_239_camera_top.jpg", + "images/task_00109_65_244_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00109_65_186_camera_top.jpg", + "images/task_00109_65_105_camera_top.jpg", + "images/task_00109_65_397_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. release\nC. pre-approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/task_00109_65_156_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00109_65_200_camera_top.jpg", + "images/task_00109_65_94_camera_top.jpg", + "images/task_00109_65_460_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the right side of the scene\nA. toward the top of the scene\nB. toward the left side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the right side of the scene" + } + ], + "images": [ + "images/task_00109_130_242_camera_top.jpg", + "images/task_00109_130_247_camera_top.jpg", + "images/task_00109_130_252_camera_top.jpg", + "images/task_00109_130_257_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000130", + "recording_id": null, + "episode_id": 130, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00109_130_124_camera_top.jpg", + "images/task_00109_130_127_camera_top.jpg", + "images/task_00109_130_130_camera_top.jpg", + "images/task_00109_130_133_camera_top.jpg", + "images/task_00109_130_136_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000130", + "recording_id": null, + "episode_id": 130, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_00109_130_261_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000130", + "recording_id": null, + "episode_id": 130, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00109_130_97_camera_top_t-6.jpg", + "images/task_00109_130_94_camera_top_t-3.jpg", + "images/task_00109_130_97_camera_top_t0.jpg", + "images/task_00109_130_100_camera_top_t+3.jpg", + "images/task_00109_130_103_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000130", + "recording_id": null, + "episode_id": 130, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. hold and carry\nC. contact\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/task_00109_130_77_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000130", + "recording_id": null, + "episode_id": 130, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_00109_130_300_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000130", + "recording_id": null, + "episode_id": 130, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00109_130_213_camera_top.jpg", + "images/task_00109_130_216_camera_top.jpg", + "images/task_00109_130_219_camera_top.jpg", + "images/task_00109_130_222_camera_top.jpg", + "images/task_00109_130_225_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000130", + "recording_id": null, + "episode_id": 130, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00109_130_95_camera_top_t-6.jpg", + "images/task_00109_130_98_camera_top_t-3.jpg", + "images/task_00109_130_101_camera_top_t0.jpg", + "images/task_00109_130_104_camera_top_t+3.jpg", + "images/task_00109_130_107_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000130", + "recording_id": null, + "episode_id": 130, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00109_130_169_camera_top_t-6.jpg", + "images/task_00109_130_172_camera_top_t-3.jpg", + "images/task_00109_130_175_camera_top_t0.jpg", + "images/task_00109_130_178_camera_top_t+3.jpg", + "images/task_00109_130_181_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000130", + "recording_id": null, + "episode_id": 130, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00109_130_97_camera_top_t-6.jpg", + "images/task_00109_130_100_camera_top_t-3.jpg", + "images/task_00109_130_103_camera_top_t0.jpg", + "images/task_00109_130_106_camera_top_t+3.jpg", + "images/task_00109_130_103_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000130", + "recording_id": null, + "episode_id": 130, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. release\nC. transfer\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. hold and carry" + } + ], + "images": [ + "images/task_00109_130_105_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000130", + "recording_id": null, + "episode_id": 130, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_00109_130_201_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000130", + "recording_id": null, + "episode_id": 130, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_00109_130_10_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000130", + "recording_id": null, + "episode_id": 130, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/task_00109_130_210_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000130", + "recording_id": null, + "episode_id": 130, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the bottom of the scene\nB. toward the left side of the scene\nC. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00109_130_104_camera_top.jpg", + "images/task_00109_130_109_camera_top.jpg", + "images/task_00109_130_114_camera_top.jpg", + "images/task_00109_130_119_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000130", + "recording_id": null, + "episode_id": 130, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00109_130_168_camera_top.jpg", + "images/task_00109_130_171_camera_top.jpg", + "images/task_00109_130_174_camera_top.jpg", + "images/task_00109_130_177_camera_top.jpg", + "images/task_00109_130_180_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000130", + "recording_id": null, + "episode_id": 130, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. approach\nC. transfer\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/task_00109_130_85_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000130", + "recording_id": null, + "episode_id": 130, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_00109_130_160_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000130", + "recording_id": null, + "episode_id": 130, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the left side of the scene\nC. toward the top of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00109_130_237_camera_top.jpg", + "images/task_00109_130_242_camera_top.jpg", + "images/task_00109_130_247_camera_top.jpg", + "images/task_00109_130_252_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000130", + "recording_id": null, + "episode_id": 130, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. release\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/task_00109_130_160_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000130", + "recording_id": null, + "episode_id": 130, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00109_130_226_camera_top_t-6.jpg", + "images/task_00109_130_229_camera_top_t-3.jpg", + "images/task_00109_130_232_camera_top_t0.jpg", + "images/task_00109_130_235_camera_top_t+3.jpg", + "images/task_00109_130_238_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000130", + "recording_id": null, + "episode_id": 130, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. release\nC. pre-approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/task_00109_130_301_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000130", + "recording_id": null, + "episode_id": 130, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00109_130_131_camera_top.jpg", + "images/task_00109_130_219_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00109", + "split": "eval", + "split_group_id": "task_00109__episode_000130", + "recording_id": null, + "episode_id": 130, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. release\nC. approach\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. approach" + } + ], + "images": [ + "images/task_00110_0_1171_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. pre-approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/task_00110_0_1498_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. hold and carry\nC. release\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/task_00110_0_1182_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. hold and carry\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/task_00110_0_862_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. pre-approach\nC. transfer\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/task_00110_0_1184_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_00110_0_449_camera_top.jpg", + "images/task_00110_0_1087_camera_top.jpg", + "images/task_00110_0_1487_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00110_0_1310_camera_top_t-6.jpg", + "images/task_00110_0_1313_camera_top_t-3.jpg", + "images/task_00110_0_1316_camera_top_t0.jpg", + "images/task_00110_0_1319_camera_top_t+3.jpg", + "images/task_00110_0_1322_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nC. toward the right side of the scene\nA. toward the top of the scene\nB. toward the left side of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the right side of the scene" + } + ], + "images": [ + "images/task_00110_0_1360_camera_top.jpg", + "images/task_00110_0_1365_camera_top.jpg", + "images/task_00110_0_1370_camera_top.jpg", + "images/task_00110_0_1375_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00110_0_583_camera_top.jpg", + "images/task_00110_0_425_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the bottom of the scene\nA. toward the right side of the scene\nC. toward the top of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00110_0_717_camera_top.jpg", + "images/task_00110_0_722_camera_top.jpg", + "images/task_00110_0_727_camera_top.jpg", + "images/task_00110_0_732_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_00110_0_780_camera_top.jpg", + "images/task_00110_0_1180_camera_top.jpg", + "images/task_00110_0_437_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the left side of the scene\nB. toward the bottom of the scene\nC. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00110_0_1367_camera_top.jpg", + "images/task_00110_0_1372_camera_top.jpg", + "images/task_00110_0_1377_camera_top.jpg", + "images/task_00110_0_1382_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00110_0_573_camera_top.jpg", + "images/task_00110_0_576_camera_top.jpg", + "images/task_00110_0_579_camera_top.jpg", + "images/task_00110_0_582_camera_top.jpg", + "images/task_00110_0_585_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_00110_0_1353_camera_top.jpg", + "images/task_00110_0_642_camera_top.jpg", + "images/task_00110_0_1174_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00110_0_1095_camera_top_t-6.jpg", + "images/task_00110_0_1098_camera_top_t-3.jpg", + "images/task_00110_0_1101_camera_top_t0.jpg", + "images/task_00110_0_1104_camera_top_t+3.jpg", + "images/task_00110_0_1107_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00110_0_849_camera_top.jpg", + "images/task_00110_0_852_camera_top.jpg", + "images/task_00110_0_855_camera_top.jpg", + "images/task_00110_0_858_camera_top.jpg", + "images/task_00110_0_861_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the top of the scene\nC. toward the bottom of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00110_0_624_camera_top.jpg", + "images/task_00110_0_629_camera_top.jpg", + "images/task_00110_0_634_camera_top.jpg", + "images/task_00110_0_639_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00110_0_1475_camera_top.jpg", + "images/task_00110_0_580_camera_top.jpg", + "images/task_00110_0_1180_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_00110_0_1002_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_00110_0_726_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00110_0_583_camera_top.jpg", + "images/task_00110_0_441_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00110_0_639_camera_top.jpg", + "images/task_00110_0_472_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. release\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. hold and carry" + } + ], + "images": [ + "images/task_00110_0_488_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. transfer\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/task_00110_0_449_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. contact\nC. transfer\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/task_00110_0_880_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00110_0_1401_camera_top_t-6.jpg", + "images/task_00110_0_1404_camera_top_t-3.jpg", + "images/task_00110_0_1407_camera_top_t0.jpg", + "images/task_00110_0_1410_camera_top_t+3.jpg", + "images/task_00110_0_1413_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00110_0_1052_camera_top.jpg", + "images/task_00110_0_1055_camera_top.jpg", + "images/task_00110_0_1058_camera_top.jpg", + "images/task_00110_0_1061_camera_top.jpg", + "images/task_00110_0_1064_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the right side of the scene\nA. toward the bottom of the scene\nB. toward the left side of the scene\nC. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the right side of the scene" + } + ], + "images": [ + "images/task_00110_0_1358_camera_top.jpg", + "images/task_00110_0_1363_camera_top.jpg", + "images/task_00110_0_1368_camera_top.jpg", + "images/task_00110_0_1373_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00110_0_756_camera_top_t-6.jpg", + "images/task_00110_0_759_camera_top_t-3.jpg", + "images/task_00110_0_762_camera_top_t0.jpg", + "images/task_00110_0_765_camera_top_t+3.jpg", + "images/task_00110_0_768_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00110_0_1108_camera_top.jpg", + "images/task_00110_0_1178_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. pre-approach\nC. approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/task_00110_0_469_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. pre-approach\nC. transfer\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. approach" + } + ], + "images": [ + "images/task_00110_0_1168_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the bottom of the scene\nA. toward the top of the scene\nB. toward the right side of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00110_0_734_camera_top.jpg", + "images/task_00110_0_739_camera_top.jpg", + "images/task_00110_0_744_camera_top.jpg", + "images/task_00110_0_749_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00110_0_1293_camera_top_t-6.jpg", + "images/task_00110_0_1296_camera_top_t-3.jpg", + "images/task_00110_0_1299_camera_top_t0.jpg", + "images/task_00110_0_1299_camera_top_t+3.jpg", + "images/task_00110_0_1305_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the top of the scene\nB. toward the bottom of the scene\nC. toward the right side of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the top of the scene" + } + ], + "images": [ + "images/task_00110_0_1199_camera_top.jpg", + "images/task_00110_0_1204_camera_top.jpg", + "images/task_00110_0_1209_camera_top.jpg", + "images/task_00110_0_1214_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_00110_0_520_camera_top.jpg", + "images/task_00110_0_1028_camera_top.jpg", + "images/task_00110_0_1176_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. pre-approach\nC. contact\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. approach" + } + ], + "images": [ + "images/task_00110_0_1174_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00110_0_776_camera_top.jpg", + "images/task_00110_0_563_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the bottom of the scene\nB. toward the right side of the scene\nC. toward the top of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00110_0_412_camera_top.jpg", + "images/task_00110_0_417_camera_top.jpg", + "images/task_00110_0_422_camera_top.jpg", + "images/task_00110_0_427_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000000", + "recording_id": null, + "episode_id": 0, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_00110_65_727_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00110_65_301_camera_top.jpg", + "images/task_00110_65_304_camera_top.jpg", + "images/task_00110_65_307_camera_top.jpg", + "images/task_00110_65_310_camera_top.jpg", + "images/task_00110_65_313_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. hold and carry\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/task_00110_65_831_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00110_65_563_camera_top.jpg", + "images/task_00110_65_414_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00110_65_527_camera_top_t-6.jpg", + "images/task_00110_65_530_camera_top_t-3.jpg", + "images/task_00110_65_533_camera_top_t0.jpg", + "images/task_00110_65_536_camera_top_t+3.jpg", + "images/task_00110_65_539_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. release\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/task_00110_65_4_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the right side of the scene\nA. toward the left side of the scene\nC. toward the top of the scene\nD. toward the bottom of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the right side of the scene" + } + ], + "images": [ + "images/task_00110_65_550_camera_top.jpg", + "images/task_00110_65_555_camera_top.jpg", + "images/task_00110_65_560_camera_top.jpg", + "images/task_00110_65_565_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_00110_65_620_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nA. toward the bottom of the scene\nB. toward the right side of the scene\nC. toward the top of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00110_65_247_camera_top.jpg", + "images/task_00110_65_252_camera_top.jpg", + "images/task_00110_65_257_camera_top.jpg", + "images/task_00110_65_262_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/task_00110_65_572_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00110_65_217_camera_top.jpg", + "images/task_00110_65_398_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the top of the scene\nA. toward the right side of the scene\nC. toward the bottom of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the top of the scene" + } + ], + "images": [ + "images/task_00110_65_684_camera_top.jpg", + "images/task_00110_65_689_camera_top.jpg", + "images/task_00110_65_694_camera_top.jpg", + "images/task_00110_65_699_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the right side of the scene\nB. toward the bottom of the scene\nC. toward the top of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the right side of the scene" + } + ], + "images": [ + "images/task_00110_65_553_camera_top.jpg", + "images/task_00110_65_558_camera_top.jpg", + "images/task_00110_65_563_camera_top.jpg", + "images/task_00110_65_568_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00110_65_343_camera_top.jpg", + "images/task_00110_65_346_camera_top.jpg", + "images/task_00110_65_349_camera_top.jpg", + "images/task_00110_65_352_camera_top.jpg", + "images/task_00110_65_355_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00110_65_261_camera_top_t-6.jpg", + "images/task_00110_65_264_camera_top_t-3.jpg", + "images/task_00110_65_267_camera_top_t0.jpg", + "images/task_00110_65_267_camera_top_t+3.jpg", + "images/task_00110_65_273_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. hold and carry\nC. release\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/task_00110_65_400_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. transfer\nC. release\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/task_00110_65_209_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_00110_65_266_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_00110_65_750_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. release\nC. contact\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/task_00110_65_1_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. hold and carry\nC. transfer\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/task_00110_65_3_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_00110_65_293_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00110_65_592_camera_top_t-6.jpg", + "images/task_00110_65_595_camera_top_t-3.jpg", + "images/task_00110_65_598_camera_top_t0.jpg", + "images/task_00110_65_601_camera_top_t+3.jpg", + "images/task_00110_65_604_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00110_65_91_camera_top.jpg", + "images/task_00110_65_94_camera_top.jpg", + "images/task_00110_65_97_camera_top.jpg", + "images/task_00110_65_100_camera_top.jpg", + "images/task_00110_65_103_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the top of the scene\nA. toward the left side of the scene\nB. toward the bottom of the scene\nC. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the top of the scene" + } + ], + "images": [ + "images/task_00110_65_685_camera_top.jpg", + "images/task_00110_65_690_camera_top.jpg", + "images/task_00110_65_695_camera_top.jpg", + "images/task_00110_65_700_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00110_65_103_camera_top_t-6.jpg", + "images/task_00110_65_106_camera_top_t-3.jpg", + "images/task_00110_65_109_camera_top_t0.jpg", + "images/task_00110_65_112_camera_top_t+3.jpg", + "images/task_00110_65_115_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/task_00110_65_2_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the right gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_00110_65_414_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. hold and carry\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/task_00110_65_449_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the bottom of the scene\nA. toward the top of the scene\nB. toward the left side of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the bottom of the scene" + } + ], + "images": [ + "images/task_00110_65_244_camera_top.jpg", + "images/task_00110_65_249_camera_top.jpg", + "images/task_00110_65_254_camera_top.jpg", + "images/task_00110_65_259_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000065", + "recording_id": null, + "episode_id": 65, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. pre-approach\nC. transfer\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/task_00110_129_1116_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. approach\nC. transfer\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/task_00110_129_623_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_00110_129_879_camera_top.jpg", + "images/task_00110_129_302_camera_top.jpg", + "images/task_00110_129_201_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/task_00110_129_545_camera_top.jpg", + "images/task_00110_129_548_camera_top.jpg", + "images/task_00110_129_551_camera_top.jpg", + "images/task_00110_129_554_camera_top.jpg", + "images/task_00110_129_557_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/task_00110_129_767_camera_top.jpg", + "images/task_00110_129_770_camera_top.jpg", + "images/task_00110_129_773_camera_top.jpg", + "images/task_00110_129_776_camera_top.jpg", + "images/task_00110_129_779_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. release\nC. pre-approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/task_00110_129_1200_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. contact\nC. approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. hold and carry" + } + ], + "images": [ + "images/task_00110_129_326_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. hold and carry\nC. transfer\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/task_00110_129_971_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_00110_129_1127_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00110_129_424_camera_top_t-6.jpg", + "images/task_00110_129_427_camera_top_t-3.jpg", + "images/task_00110_129_430_camera_top_t0.jpg", + "images/task_00110_129_433_camera_top_t+3.jpg", + "images/task_00110_129_436_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the top of the scene\nA. toward the right side of the scene\nB. toward the bottom of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the top of the scene" + } + ], + "images": [ + "images/task_00110_129_1106_camera_top.jpg", + "images/task_00110_129_1111_camera_top.jpg", + "images/task_00110_129_1116_camera_top.jpg", + "images/task_00110_129_1121_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nC. toward the top of the scene\nA. toward the left side of the scene\nB. toward the bottom of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. toward the top of the scene" + } + ], + "images": [ + "images/task_00110_129_233_camera_top.jpg", + "images/task_00110_129_238_camera_top.jpg", + "images/task_00110_129_243_camera_top.jpg", + "images/task_00110_129_248_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the left gripper currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_00110_129_1234_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T2", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00110_129_663_camera_top_t-6.jpg", + "images/task_00110_129_666_camera_top_t-3.jpg", + "images/task_00110_129_669_camera_top_t0.jpg", + "images/task_00110_129_672_camera_top_t+3.jpg", + "images/task_00110_129_675_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. pre-approach\nC. release\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/task_00110_129_1031_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00110_129_962_camera_top_t-6.jpg", + "images/task_00110_129_965_camera_top_t-3.jpg", + "images/task_00110_129_968_camera_top_t0.jpg", + "images/task_00110_129_971_camera_top_t+3.jpg", + "images/task_00110_129_974_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00110_129_291_camera_top.jpg", + "images/task_00110_129_225_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_00110_129_718_camera_top.jpg", + "images/task_00110_129_223_camera_top.jpg", + "images/task_00110_129_264_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nB. toward the top of the scene\nA. toward the bottom of the scene\nC. toward the right side of the scene\nD. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the top of the scene" + } + ], + "images": [ + "images/task_00110_129_1108_camera_top.jpg", + "images/task_00110_129_1113_camera_top.jpg", + "images/task_00110_129_1118_camera_top.jpg", + "images/task_00110_129_1123_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the left side of the scene\nB. toward the bottom of the scene\nC. toward the right side of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the left side of the scene" + } + ], + "images": [ + "images/task_00110_129_294_camera_top.jpg", + "images/task_00110_129_299_camera_top.jpg", + "images/task_00110_129_304_camera_top.jpg", + "images/task_00110_129_309_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the left side of the scene\nB. toward the top of the scene\nC. toward the bottom of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the left side of the scene" + } + ], + "images": [ + "images/task_00110_129_304_camera_top.jpg", + "images/task_00110_129_309_camera_top.jpg", + "images/task_00110_129_314_camera_top.jpg", + "images/task_00110_129_319_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00110_129_404_camera_top.jpg", + "images/task_00110_129_213_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/task_00110_129_400_camera_top.jpg", + "images/task_00110_129_403_camera_top.jpg", + "images/task_00110_129_406_camera_top.jpg", + "images/task_00110_129_409_camera_top.jpg", + "images/task_00110_129_412_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_progress", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00110_129_224_camera_top.jpg", + "images/task_00110_129_294_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. contact\nC. transfer\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/task_00110_129_225_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nD. toward the left side of the scene\nA. toward the bottom of the scene\nB. toward the right side of the scene\nC. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the left side of the scene" + } + ], + "images": [ + "images/task_00110_129_297_camera_top.jpg", + "images/task_00110_129_302_camera_top.jpg", + "images/task_00110_129_307_camera_top.jpg", + "images/task_00110_129_312_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_00110_129_1068_camera_top_t-6.jpg", + "images/task_00110_129_1071_camera_top_t-3.jpg", + "images/task_00110_129_1074_camera_top_t0.jpg", + "images/task_00110_129_1077_camera_top_t+3.jpg", + "images/task_00110_129_1080_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_00110_129_225_camera_top.jpg", + "images/task_00110_129_424_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nA. toward the right side of the scene\nB. toward the bottom of the scene\nC. toward the left side of the scene\nD. toward the top of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. toward the right side of the scene" + } + ], + "images": [ + "images/task_00110_129_974_camera_top.jpg", + "images/task_00110_129_979_camera_top.jpg", + "images/task_00110_129_984_camera_top.jpg", + "images/task_00110_129_989_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. hold and carry\nC. transfer\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. hold and carry" + } + ], + "images": [ + "images/task_00110_129_336_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. pre-approach" + } + ], + "images": [ + "images/task_00110_129_179_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_00110_129_417_camera_top.jpg", + "images/task_00110_129_197_camera_top.jpg", + "images/task_00110_129_281_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. hold and carry\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/task_00110_129_903_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T1", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_00110_129_941_camera_top_t-6.jpg", + "images/task_00110_129_944_camera_top_t-3.jpg", + "images/task_00110_129_947_camera_top_t0.jpg", + "images/task_00110_129_950_camera_top_t+3.jpg", + "images/task_00110_129_953_camera_top_t+6.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T6", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation episode. The panel placement and the labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_00110_129_227_camera_top.jpg", + "images/task_00110_129_365_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_binary", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move in the scene?\nOptions:\nB. toward the left side of the scene\nA. toward the top of the scene\nC. toward the bottom of the scene\nD. toward the right side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. toward the left side of the scene" + } + ], + "images": [ + "images/task_00110_129_302_camera_top.jpg", + "images/task_00110_129_307_camera_top.jpg", + "images/task_00110_129_312_camera_top.jpg", + "images/task_00110_129_317_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move in the scene?\nOptions:\nD. toward the top of the scene\nA. toward the bottom of the scene\nB. toward the right side of the scene\nC. toward the left side of the scene\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. toward the top of the scene" + } + ], + "images": [ + "images/task_00110_129_257_camera_top.jpg", + "images/task_00110_129_262_camera_top.jpg", + "images/task_00110_129_267_camera_top.jpg", + "images/task_00110_129_272_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T3", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_00110_129_221_camera_top.jpg", + "images/task_00110_129_322_camera_top.jpg", + "images/task_00110_129_1137_camera_top.jpg" + ], + "dataset": "GM100", + "source_dataset": "GM100", + "task_type": "T_temporal", + "task_id": "task_00110", + "split": "eval", + "split_group_id": "task_00110__episode_000129", + "recording_id": null, + "episode_id": 129, + "camera": "camera_top", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0002_user_0007_scene_0005_cfg_0002_98_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0002_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0002_user_0007_scene_0005_cfg_0002_116_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0002_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0002_user_0007_scene_0005_cfg_0002_134_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0002_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0002_user_0007_scene_0005_cfg_0002_158_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0002_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0002_user_0007_scene_0005_cfg_0002_176_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0002_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0002_user_0007_scene_0005_cfg_0002_194_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0002_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0002_user_0007_scene_0005_cfg_0002_222_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0002_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0002_user_0007_scene_0005_cfg_0002_240_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0002_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0002_user_0007_scene_0005_cfg_0002_275_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0002_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Up" + } + ], + "images": [ + "images/task_0002_user_0007_scene_0005_cfg_0002_40_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_50_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_60_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_70_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0002_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Up" + } + ], + "images": [ + "images/task_0002_user_0007_scene_0005_cfg_0002_50_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_60_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_70_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_80_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0002_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Up" + } + ], + "images": [ + "images/task_0002_user_0007_scene_0005_cfg_0002_60_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_70_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_80_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_90_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0002_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Left" + } + ], + "images": [ + "images/task_0002_user_0007_scene_0005_cfg_0002_230_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_240_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_250_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_260_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0002_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Left" + } + ], + "images": [ + "images/task_0002_user_0007_scene_0005_cfg_0002_240_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_250_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_260_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_270_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0002_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Down" + } + ], + "images": [ + "images/task_0002_user_0007_scene_0005_cfg_0002_260_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_270_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_280_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_290_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0002_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0002_user_0007_scene_0005_cfg_0002_110_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_113_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_116_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_119_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_122_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0002_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0002_user_0007_scene_0005_cfg_0002_232_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_235_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_238_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_241_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_244_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0002_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0002_user_0007_scene_0005_cfg_0002_250_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_253_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_256_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_259_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_262_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0002_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0002_user_0007_scene_0005_cfg_0002_7_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_17_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_27_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_37_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0002_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0002_user_0007_scene_0005_cfg_0002_27_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_37_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_47_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_57_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0002_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0002_user_0007_scene_0005_cfg_0002_47_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_57_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_67_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_77_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0002_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0002_user_0007_scene_0005_cfg_0002_67_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_77_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_87_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_97_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0002_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0002_user_0007_scene_0005_cfg_0002_87_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_97_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_107_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_117_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0002_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0002_user_0007_scene_0005_cfg_0002_107_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_117_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_127_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_137_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0002_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0002_user_0007_scene_0005_cfg_0002_248_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_190_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0002_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0002_user_0007_scene_0005_cfg_0002_190_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_193_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_196_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_199_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_202_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0002_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0002_user_0007_scene_0005_cfg_0002_213_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_216_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_219_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_222_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_225_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0002_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0002_user_0007_scene_0005_cfg_0002_236_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_239_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_242_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_245_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_248_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0002_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_0002_user_0007_scene_0005_cfg_0002_219_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_188_036422060215.jpg", + "images/task_0002_user_0007_scene_0005_cfg_0002_250_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0002_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0008_cfg_0002_71_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0008_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0008_cfg_0002_89_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0008_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0008_cfg_0002_107_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0008_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0008_cfg_0002_131_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0008_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0008_cfg_0002_149_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0008_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0008_cfg_0002_162_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0008_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0008_cfg_0002_185_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0008_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Up" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0008_cfg_0002_100_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_110_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_120_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_130_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0008_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Up" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0008_cfg_0002_110_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_120_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_130_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_140_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0008_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Up" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0008_cfg_0002_120_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_130_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_140_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_150_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0008_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Down" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0008_cfg_0002_160_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_170_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_180_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_190_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0008_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0008_cfg_0002_170_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_180_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_190_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_200_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0008_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0008_cfg_0002_180_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_190_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_200_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_210_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0008_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0008_cfg_0002_190_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_200_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_210_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_220_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0008_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0008_cfg_0002_65_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_68_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_71_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_74_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_77_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0008_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0008_cfg_0002_83_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_86_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_89_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_92_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_95_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0008_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0008_cfg_0002_101_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_104_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_107_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_110_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_113_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0008_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0008_cfg_0002_125_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_128_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_131_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_134_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_137_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0008_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0008_cfg_0002_179_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_182_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_185_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_188_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_191_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0008_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0008_cfg_0002_197_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_200_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_203_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_206_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_209_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0008_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0008_cfg_0002_5_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_12_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_19_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_26_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0008_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0008_cfg_0002_19_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_26_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_33_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_40_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0008_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0008_cfg_0002_33_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_40_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_47_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_54_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0008_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0008_cfg_0002_47_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_54_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_61_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_68_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0008_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0008_cfg_0002_61_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_68_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_75_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_82_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0008_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0008_cfg_0002_75_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_82_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_89_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_96_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0008_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0008_cfg_0002_153_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_193_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0008_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0008_cfg_0002_151_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_154_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_157_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_160_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_163_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0008_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0008_cfg_0002_167_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_170_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_173_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_176_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_179_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0008_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0008_cfg_0002_183_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_186_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_189_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_192_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_195_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0008_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0008_cfg_0002_173_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_195_036422060215.jpg", + "images/task_0002_user_0013_scene_0008_cfg_0002_151_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0008_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0009_cfg_0002_91_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0009_cfg_0002_109_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0009_cfg_0002_127_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0009_cfg_0002_150_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0009_cfg_0002_286_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0009_cfg_0002_304_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0009_cfg_0002_361_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0009_cfg_0002_362_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0009_cfg_0002_380_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Left" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0009_cfg_0002_40_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_50_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_60_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_70_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Left" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0009_cfg_0002_50_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_60_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_70_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_80_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Up" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0009_cfg_0002_60_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_70_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_80_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_90_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Up" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0009_cfg_0002_70_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_80_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_90_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_100_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Up" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0009_cfg_0002_80_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_90_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_100_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_110_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Left" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0009_cfg_0002_120_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_130_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_140_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_150_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Down" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0009_cfg_0002_260_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_270_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_280_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_290_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0009_cfg_0002_350_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_360_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_370_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_380_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0009_cfg_0002_360_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_370_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_380_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_390_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0009_cfg_0002_370_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_380_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_390_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_400_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0009_cfg_0002_121_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_124_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_127_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_130_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_133_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0009_cfg_0002_144_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_147_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_150_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_153_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_156_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0009_cfg_0002_355_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_358_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_361_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_364_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_367_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0009_cfg_0002_373_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_376_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_379_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_382_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_385_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0009_cfg_0002_8_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_21_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_34_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_47_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0009_cfg_0002_34_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_47_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_60_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_73_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0009_cfg_0002_60_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_73_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_86_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_99_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0009_cfg_0002_86_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_99_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_112_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_125_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0009_cfg_0002_112_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_125_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_138_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_151_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0009_cfg_0002_138_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_151_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_164_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_177_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0009_cfg_0002_159_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_172_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0009_cfg_0002_172_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_369_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0009_cfg_0002_379_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_369_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0009_cfg_0002_154_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_157_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_160_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_163_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_166_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0009_cfg_0002_160_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_163_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_166_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_169_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_172_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0009_cfg_0002_165_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_168_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_171_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_174_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_177_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0009_cfg_0002_364_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_367_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_370_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_373_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_376_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0009_cfg_0002_368_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_371_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_374_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_377_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_380_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0002_user_0013_scene_0009_cfg_0002_372_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_375_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_378_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_381_036422060215.jpg", + "images/task_0002_user_0013_scene_0009_cfg_0002_384_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0002", + "split": "eval", + "split_group_id": "task_0002_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0002_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0003_user_0013_scene_0004_cfg_0002_53_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0003", + "split": "eval", + "split_group_id": "task_0003_user_0013_scene_0004_cfg_0002", + "recording_id": "task_0003_user_0013_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0003_user_0013_scene_0004_cfg_0002_71_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0003", + "split": "eval", + "split_group_id": "task_0003_user_0013_scene_0004_cfg_0002", + "recording_id": "task_0003_user_0013_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0003_user_0013_scene_0004_cfg_0002_89_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0003", + "split": "eval", + "split_group_id": "task_0003_user_0013_scene_0004_cfg_0002", + "recording_id": "task_0003_user_0013_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0003_user_0013_scene_0004_cfg_0002_113_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0003", + "split": "eval", + "split_group_id": "task_0003_user_0013_scene_0004_cfg_0002", + "recording_id": "task_0003_user_0013_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0003_user_0013_scene_0004_cfg_0002_361_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0003", + "split": "eval", + "split_group_id": "task_0003_user_0013_scene_0004_cfg_0002", + "recording_id": "task_0003_user_0013_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0003_user_0013_scene_0004_cfg_0002_458_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0003", + "split": "eval", + "split_group_id": "task_0003_user_0013_scene_0004_cfg_0002", + "recording_id": "task_0003_user_0013_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0003_user_0013_scene_0004_cfg_0002_569_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0003", + "split": "eval", + "split_group_id": "task_0003_user_0013_scene_0004_cfg_0002", + "recording_id": "task_0003_user_0013_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0003_user_0013_scene_0004_cfg_0002_573_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0003", + "split": "eval", + "split_group_id": "task_0003_user_0013_scene_0004_cfg_0002", + "recording_id": "task_0003_user_0013_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0003_user_0013_scene_0004_cfg_0002_611_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0003", + "split": "eval", + "split_group_id": "task_0003_user_0013_scene_0004_cfg_0002", + "recording_id": "task_0003_user_0013_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Up" + } + ], + "images": [ + "images/task_0003_user_0013_scene_0004_cfg_0002_90_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_100_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_110_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_120_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0003", + "split": "eval", + "split_group_id": "task_0003_user_0013_scene_0004_cfg_0002", + "recording_id": "task_0003_user_0013_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Up" + } + ], + "images": [ + "images/task_0003_user_0013_scene_0004_cfg_0002_100_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_110_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_120_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_130_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0003", + "split": "eval", + "split_group_id": "task_0003_user_0013_scene_0004_cfg_0002", + "recording_id": "task_0003_user_0013_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Up" + } + ], + "images": [ + "images/task_0003_user_0013_scene_0004_cfg_0002_110_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_120_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_130_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_140_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0003", + "split": "eval", + "split_group_id": "task_0003_user_0013_scene_0004_cfg_0002", + "recording_id": "task_0003_user_0013_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_0003_user_0013_scene_0004_cfg_0002_150_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_160_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_170_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_180_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0003", + "split": "eval", + "split_group_id": "task_0003_user_0013_scene_0004_cfg_0002", + "recording_id": "task_0003_user_0013_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_0003_user_0013_scene_0004_cfg_0002_160_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_170_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_180_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_190_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0003", + "split": "eval", + "split_group_id": "task_0003_user_0013_scene_0004_cfg_0002", + "recording_id": "task_0003_user_0013_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Left" + } + ], + "images": [ + "images/task_0003_user_0013_scene_0004_cfg_0002_380_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_390_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_400_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_410_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0003", + "split": "eval", + "split_group_id": "task_0003_user_0013_scene_0004_cfg_0002", + "recording_id": "task_0003_user_0013_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Left" + } + ], + "images": [ + "images/task_0003_user_0013_scene_0004_cfg_0002_390_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_400_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_410_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_420_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0003", + "split": "eval", + "split_group_id": "task_0003_user_0013_scene_0004_cfg_0002", + "recording_id": "task_0003_user_0013_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Left\nC. Down\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Left" + } + ], + "images": [ + "images/task_0003_user_0013_scene_0004_cfg_0002_400_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_410_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_420_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_430_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0003", + "split": "eval", + "split_group_id": "task_0003_user_0013_scene_0004_cfg_0002", + "recording_id": "task_0003_user_0013_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Down" + } + ], + "images": [ + "images/task_0003_user_0013_scene_0004_cfg_0002_590_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_600_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_610_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_620_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0003", + "split": "eval", + "split_group_id": "task_0003_user_0013_scene_0004_cfg_0002", + "recording_id": "task_0003_user_0013_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Down" + } + ], + "images": [ + "images/task_0003_user_0013_scene_0004_cfg_0002_600_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_610_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_620_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_630_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0003", + "split": "eval", + "split_group_id": "task_0003_user_0013_scene_0004_cfg_0002", + "recording_id": "task_0003_user_0013_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Down" + } + ], + "images": [ + "images/task_0003_user_0013_scene_0004_cfg_0002_610_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_620_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_630_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_640_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0003", + "split": "eval", + "split_group_id": "task_0003_user_0013_scene_0004_cfg_0002", + "recording_id": "task_0003_user_0013_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0003_user_0013_scene_0004_cfg_0002_47_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_50_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_53_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_56_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_59_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0003", + "split": "eval", + "split_group_id": "task_0003_user_0013_scene_0004_cfg_0002", + "recording_id": "task_0003_user_0013_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0003_user_0013_scene_0004_cfg_0002_83_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_86_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_89_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_92_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_95_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0003", + "split": "eval", + "split_group_id": "task_0003_user_0013_scene_0004_cfg_0002", + "recording_id": "task_0003_user_0013_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0003_user_0013_scene_0004_cfg_0002_107_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_110_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_113_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_116_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_119_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0003", + "split": "eval", + "split_group_id": "task_0003_user_0013_scene_0004_cfg_0002", + "recording_id": "task_0003_user_0013_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0003_user_0013_scene_0004_cfg_0002_363_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_366_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_369_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_372_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_375_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0003", + "split": "eval", + "split_group_id": "task_0003_user_0013_scene_0004_cfg_0002", + "recording_id": "task_0003_user_0013_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0003_user_0013_scene_0004_cfg_0002_438_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_441_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_444_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_447_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_450_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0003", + "split": "eval", + "split_group_id": "task_0003_user_0013_scene_0004_cfg_0002", + "recording_id": "task_0003_user_0013_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0003_user_0013_scene_0004_cfg_0002_608_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_611_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_614_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_617_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_620_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0003", + "split": "eval", + "split_group_id": "task_0003_user_0013_scene_0004_cfg_0002", + "recording_id": "task_0003_user_0013_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0003_user_0013_scene_0004_cfg_0002_13_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_33_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_53_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_73_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0003", + "split": "eval", + "split_group_id": "task_0003_user_0013_scene_0004_cfg_0002", + "recording_id": "task_0003_user_0013_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0003_user_0013_scene_0004_cfg_0002_53_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_73_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_93_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_113_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0003", + "split": "eval", + "split_group_id": "task_0003_user_0013_scene_0004_cfg_0002", + "recording_id": "task_0003_user_0013_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0003_user_0013_scene_0004_cfg_0002_93_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_113_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_133_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_153_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0003", + "split": "eval", + "split_group_id": "task_0003_user_0013_scene_0004_cfg_0002", + "recording_id": "task_0003_user_0013_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0003_user_0013_scene_0004_cfg_0002_133_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_153_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_173_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_193_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0003", + "split": "eval", + "split_group_id": "task_0003_user_0013_scene_0004_cfg_0002", + "recording_id": "task_0003_user_0013_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0003_user_0013_scene_0004_cfg_0002_173_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_193_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_213_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_233_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0003", + "split": "eval", + "split_group_id": "task_0003_user_0013_scene_0004_cfg_0002", + "recording_id": "task_0003_user_0013_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0003_user_0013_scene_0004_cfg_0002_213_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_233_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_253_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_273_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0003", + "split": "eval", + "split_group_id": "task_0003_user_0013_scene_0004_cfg_0002", + "recording_id": "task_0003_user_0013_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0003_user_0013_scene_0004_cfg_0002_122_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_136_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0003", + "split": "eval", + "split_group_id": "task_0003_user_0013_scene_0004_cfg_0002", + "recording_id": "task_0003_user_0013_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0003_user_0013_scene_0004_cfg_0002_617_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_585_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0003", + "split": "eval", + "split_group_id": "task_0003_user_0013_scene_0004_cfg_0002", + "recording_id": "task_0003_user_0013_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0003_user_0013_scene_0004_cfg_0002_136_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_398_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0003", + "split": "eval", + "split_group_id": "task_0003_user_0013_scene_0004_cfg_0002", + "recording_id": "task_0003_user_0013_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0003_user_0013_scene_0004_cfg_0002_465_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_585_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0003", + "split": "eval", + "split_group_id": "task_0003_user_0013_scene_0004_cfg_0002", + "recording_id": "task_0003_user_0013_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0003_user_0013_scene_0004_cfg_0002_398_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_465_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0003", + "split": "eval", + "split_group_id": "task_0003_user_0013_scene_0004_cfg_0002", + "recording_id": "task_0003_user_0013_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0003_user_0013_scene_0004_cfg_0002_117_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_120_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_123_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_126_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_129_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0003", + "split": "eval", + "split_group_id": "task_0003_user_0013_scene_0004_cfg_0002", + "recording_id": "task_0003_user_0013_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0003_user_0013_scene_0004_cfg_0002_123_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_126_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_129_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_132_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_135_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0003", + "split": "eval", + "split_group_id": "task_0003_user_0013_scene_0004_cfg_0002", + "recording_id": "task_0003_user_0013_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0003_user_0013_scene_0004_cfg_0002_129_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_132_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_135_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_138_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_141_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0003", + "split": "eval", + "split_group_id": "task_0003_user_0013_scene_0004_cfg_0002", + "recording_id": "task_0003_user_0013_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0003_user_0013_scene_0004_cfg_0002_399_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_402_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_405_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_408_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_411_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0003", + "split": "eval", + "split_group_id": "task_0003_user_0013_scene_0004_cfg_0002", + "recording_id": "task_0003_user_0013_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0003_user_0013_scene_0004_cfg_0002_426_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_429_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_432_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_435_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_438_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0003", + "split": "eval", + "split_group_id": "task_0003_user_0013_scene_0004_cfg_0002", + "recording_id": "task_0003_user_0013_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0003_user_0013_scene_0004_cfg_0002_452_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_455_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_458_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_461_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_464_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0003", + "split": "eval", + "split_group_id": "task_0003_user_0013_scene_0004_cfg_0002", + "recording_id": "task_0003_user_0013_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0003_user_0013_scene_0004_cfg_0002_583_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_586_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_589_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_592_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_595_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0003", + "split": "eval", + "split_group_id": "task_0003_user_0013_scene_0004_cfg_0002", + "recording_id": "task_0003_user_0013_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0003_user_0013_scene_0004_cfg_0002_595_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_598_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_601_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_604_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_607_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0003", + "split": "eval", + "split_group_id": "task_0003_user_0013_scene_0004_cfg_0002", + "recording_id": "task_0003_user_0013_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0003_user_0013_scene_0004_cfg_0002_607_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_610_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_613_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_616_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_619_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0003", + "split": "eval", + "split_group_id": "task_0003_user_0013_scene_0004_cfg_0002", + "recording_id": "task_0003_user_0013_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_0003_user_0013_scene_0004_cfg_0002_601_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_584_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_618_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0003", + "split": "eval", + "split_group_id": "task_0003_user_0013_scene_0004_cfg_0002", + "recording_id": "task_0003_user_0013_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_0003_user_0013_scene_0004_cfg_0002_468_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_432_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_395_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0003", + "split": "eval", + "split_group_id": "task_0003_user_0013_scene_0004_cfg_0002", + "recording_id": "task_0003_user_0013_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_0003_user_0013_scene_0004_cfg_0002_137_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_129_036422060215.jpg", + "images/task_0003_user_0013_scene_0004_cfg_0002_121_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0003", + "split": "eval", + "split_group_id": "task_0003_user_0013_scene_0004_cfg_0002", + "recording_id": "task_0003_user_0013_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_0006_user_0013_scene_0009_cfg_0002_30_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_40_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_50_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_60_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0006", + "split": "eval", + "split_group_id": "task_0006_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0006_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_0006_user_0013_scene_0009_cfg_0002_40_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_50_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_60_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_70_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0006", + "split": "eval", + "split_group_id": "task_0006_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0006_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Up" + } + ], + "images": [ + "images/task_0006_user_0013_scene_0009_cfg_0002_60_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_70_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_80_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_90_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0006", + "split": "eval", + "split_group_id": "task_0006_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0006_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Up" + } + ], + "images": [ + "images/task_0006_user_0013_scene_0009_cfg_0002_70_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_80_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_90_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_100_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0006", + "split": "eval", + "split_group_id": "task_0006_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0006_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Up" + } + ], + "images": [ + "images/task_0006_user_0013_scene_0009_cfg_0002_140_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_150_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_160_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_170_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0006", + "split": "eval", + "split_group_id": "task_0006_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0006_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Down" + } + ], + "images": [ + "images/task_0006_user_0013_scene_0009_cfg_0002_210_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_220_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_230_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_240_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0006", + "split": "eval", + "split_group_id": "task_0006_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0006_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Left" + } + ], + "images": [ + "images/task_0006_user_0013_scene_0009_cfg_0002_240_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_250_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_260_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_270_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0006", + "split": "eval", + "split_group_id": "task_0006_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0006_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Left" + } + ], + "images": [ + "images/task_0006_user_0013_scene_0009_cfg_0002_250_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_260_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_270_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_280_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0006", + "split": "eval", + "split_group_id": "task_0006_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0006_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Left" + } + ], + "images": [ + "images/task_0006_user_0013_scene_0009_cfg_0002_260_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_270_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_280_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_290_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0006", + "split": "eval", + "split_group_id": "task_0006_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0006_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0006_user_0013_scene_0009_cfg_0002_241_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_244_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_247_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_250_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_253_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0006", + "split": "eval", + "split_group_id": "task_0006_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0006_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0006_user_0013_scene_0009_cfg_0002_263_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_266_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_269_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_272_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_275_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0006", + "split": "eval", + "split_group_id": "task_0006_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0006_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0006_user_0013_scene_0009_cfg_0002_304_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_307_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_310_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_313_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_316_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0006", + "split": "eval", + "split_group_id": "task_0006_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0006_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0006_user_0013_scene_0009_cfg_0002_322_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_325_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_328_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_331_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_334_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0006", + "split": "eval", + "split_group_id": "task_0006_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0006_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0006_user_0013_scene_0009_cfg_0002_7_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_18_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_29_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_40_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0006", + "split": "eval", + "split_group_id": "task_0006_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0006_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0006_user_0013_scene_0009_cfg_0002_29_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_40_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_51_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_62_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0006", + "split": "eval", + "split_group_id": "task_0006_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0006_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0006_user_0013_scene_0009_cfg_0002_51_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_62_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_73_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_84_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0006", + "split": "eval", + "split_group_id": "task_0006_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0006_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0006_user_0013_scene_0009_cfg_0002_62_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_73_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_84_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_95_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0006", + "split": "eval", + "split_group_id": "task_0006_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0006_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0006_user_0013_scene_0009_cfg_0002_84_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_95_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_106_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_117_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0006", + "split": "eval", + "split_group_id": "task_0006_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0006_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0006_user_0013_scene_0009_cfg_0002_106_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_117_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_128_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_139_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0006", + "split": "eval", + "split_group_id": "task_0006_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0006_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0006_user_0013_scene_0009_cfg_0002_329_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_290_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0006", + "split": "eval", + "split_group_id": "task_0006_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0006_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0006_user_0013_scene_0009_cfg_0002_288_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_291_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_294_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_297_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_300_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0006", + "split": "eval", + "split_group_id": "task_0006_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0006_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0006_user_0013_scene_0009_cfg_0002_304_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_307_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_310_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_313_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_316_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0006", + "split": "eval", + "split_group_id": "task_0006_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0006_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0006_user_0013_scene_0009_cfg_0002_319_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_322_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_325_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_328_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_331_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0006", + "split": "eval", + "split_group_id": "task_0006_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0006_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_0006_user_0013_scene_0009_cfg_0002_330_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_289_036422060215.jpg", + "images/task_0006_user_0013_scene_0009_cfg_0002_310_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0006", + "split": "eval", + "split_group_id": "task_0006_user_0013_scene_0009_cfg_0002", + "recording_id": "task_0006_user_0013_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0007_user_0007_scene_0009_cfg_0002_9_036422060215.jpg", + "images/task_0007_user_0007_scene_0009_cfg_0002_23_036422060215.jpg", + "images/task_0007_user_0007_scene_0009_cfg_0002_37_036422060215.jpg", + "images/task_0007_user_0007_scene_0009_cfg_0002_51_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0007", + "split": "eval", + "split_group_id": "task_0007_user_0007_scene_0009_cfg_0002", + "recording_id": "task_0007_user_0007_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0007_user_0007_scene_0009_cfg_0002_23_036422060215.jpg", + "images/task_0007_user_0007_scene_0009_cfg_0002_37_036422060215.jpg", + "images/task_0007_user_0007_scene_0009_cfg_0002_51_036422060215.jpg", + "images/task_0007_user_0007_scene_0009_cfg_0002_65_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0007", + "split": "eval", + "split_group_id": "task_0007_user_0007_scene_0009_cfg_0002", + "recording_id": "task_0007_user_0007_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0007_user_0007_scene_0009_cfg_0002_37_036422060215.jpg", + "images/task_0007_user_0007_scene_0009_cfg_0002_51_036422060215.jpg", + "images/task_0007_user_0007_scene_0009_cfg_0002_65_036422060215.jpg", + "images/task_0007_user_0007_scene_0009_cfg_0002_79_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0007", + "split": "eval", + "split_group_id": "task_0007_user_0007_scene_0009_cfg_0002", + "recording_id": "task_0007_user_0007_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0007_user_0007_scene_0009_cfg_0002_51_036422060215.jpg", + "images/task_0007_user_0007_scene_0009_cfg_0002_65_036422060215.jpg", + "images/task_0007_user_0007_scene_0009_cfg_0002_79_036422060215.jpg", + "images/task_0007_user_0007_scene_0009_cfg_0002_93_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0007", + "split": "eval", + "split_group_id": "task_0007_user_0007_scene_0009_cfg_0002", + "recording_id": "task_0007_user_0007_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0007_user_0007_scene_0009_cfg_0002_65_036422060215.jpg", + "images/task_0007_user_0007_scene_0009_cfg_0002_79_036422060215.jpg", + "images/task_0007_user_0007_scene_0009_cfg_0002_93_036422060215.jpg", + "images/task_0007_user_0007_scene_0009_cfg_0002_107_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0007", + "split": "eval", + "split_group_id": "task_0007_user_0007_scene_0009_cfg_0002", + "recording_id": "task_0007_user_0007_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0007_user_0007_scene_0009_cfg_0002_93_036422060215.jpg", + "images/task_0007_user_0007_scene_0009_cfg_0002_107_036422060215.jpg", + "images/task_0007_user_0007_scene_0009_cfg_0002_121_036422060215.jpg", + "images/task_0007_user_0007_scene_0009_cfg_0002_135_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0007", + "split": "eval", + "split_group_id": "task_0007_user_0007_scene_0009_cfg_0002", + "recording_id": "task_0007_user_0007_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0007_user_0007_scene_0009_cfg_0002_107_036422060215.jpg", + "images/task_0007_user_0007_scene_0009_cfg_0002_121_036422060215.jpg", + "images/task_0007_user_0007_scene_0009_cfg_0002_135_036422060215.jpg", + "images/task_0007_user_0007_scene_0009_cfg_0002_149_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0007", + "split": "eval", + "split_group_id": "task_0007_user_0007_scene_0009_cfg_0002", + "recording_id": "task_0007_user_0007_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0007_user_0007_scene_0009_cfg_0002_121_036422060215.jpg", + "images/task_0007_user_0007_scene_0009_cfg_0002_135_036422060215.jpg", + "images/task_0007_user_0007_scene_0009_cfg_0002_149_036422060215.jpg", + "images/task_0007_user_0007_scene_0009_cfg_0002_163_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0007", + "split": "eval", + "split_group_id": "task_0007_user_0007_scene_0009_cfg_0002", + "recording_id": "task_0007_user_0007_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0007_user_0007_scene_0009_cfg_0002_135_036422060215.jpg", + "images/task_0007_user_0007_scene_0009_cfg_0002_149_036422060215.jpg", + "images/task_0007_user_0007_scene_0009_cfg_0002_163_036422060215.jpg", + "images/task_0007_user_0007_scene_0009_cfg_0002_177_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0007", + "split": "eval", + "split_group_id": "task_0007_user_0007_scene_0009_cfg_0002", + "recording_id": "task_0007_user_0007_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0007_user_0007_scene_0009_cfg_0002_149_036422060215.jpg", + "images/task_0007_user_0007_scene_0009_cfg_0002_163_036422060215.jpg", + "images/task_0007_user_0007_scene_0009_cfg_0002_177_036422060215.jpg", + "images/task_0007_user_0007_scene_0009_cfg_0002_191_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0007", + "split": "eval", + "split_group_id": "task_0007_user_0007_scene_0009_cfg_0002", + "recording_id": "task_0007_user_0007_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0008_user_0007_scene_0002_cfg_0002_8_036422060215.jpg", + "images/task_0008_user_0007_scene_0002_cfg_0002_20_036422060215.jpg", + "images/task_0008_user_0007_scene_0002_cfg_0002_32_036422060215.jpg", + "images/task_0008_user_0007_scene_0002_cfg_0002_44_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0008", + "split": "eval", + "split_group_id": "task_0008_user_0007_scene_0002_cfg_0002", + "recording_id": "task_0008_user_0007_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0008_user_0007_scene_0002_cfg_0002_20_036422060215.jpg", + "images/task_0008_user_0007_scene_0002_cfg_0002_32_036422060215.jpg", + "images/task_0008_user_0007_scene_0002_cfg_0002_44_036422060215.jpg", + "images/task_0008_user_0007_scene_0002_cfg_0002_56_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0008", + "split": "eval", + "split_group_id": "task_0008_user_0007_scene_0002_cfg_0002", + "recording_id": "task_0008_user_0007_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0008_user_0007_scene_0002_cfg_0002_32_036422060215.jpg", + "images/task_0008_user_0007_scene_0002_cfg_0002_44_036422060215.jpg", + "images/task_0008_user_0007_scene_0002_cfg_0002_56_036422060215.jpg", + "images/task_0008_user_0007_scene_0002_cfg_0002_68_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0008", + "split": "eval", + "split_group_id": "task_0008_user_0007_scene_0002_cfg_0002", + "recording_id": "task_0008_user_0007_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0008_user_0007_scene_0002_cfg_0002_44_036422060215.jpg", + "images/task_0008_user_0007_scene_0002_cfg_0002_56_036422060215.jpg", + "images/task_0008_user_0007_scene_0002_cfg_0002_68_036422060215.jpg", + "images/task_0008_user_0007_scene_0002_cfg_0002_80_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0008", + "split": "eval", + "split_group_id": "task_0008_user_0007_scene_0002_cfg_0002", + "recording_id": "task_0008_user_0007_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0008_user_0007_scene_0002_cfg_0002_56_036422060215.jpg", + "images/task_0008_user_0007_scene_0002_cfg_0002_68_036422060215.jpg", + "images/task_0008_user_0007_scene_0002_cfg_0002_80_036422060215.jpg", + "images/task_0008_user_0007_scene_0002_cfg_0002_92_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0008", + "split": "eval", + "split_group_id": "task_0008_user_0007_scene_0002_cfg_0002", + "recording_id": "task_0008_user_0007_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0008_user_0007_scene_0002_cfg_0002_80_036422060215.jpg", + "images/task_0008_user_0007_scene_0002_cfg_0002_92_036422060215.jpg", + "images/task_0008_user_0007_scene_0002_cfg_0002_104_036422060215.jpg", + "images/task_0008_user_0007_scene_0002_cfg_0002_116_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0008", + "split": "eval", + "split_group_id": "task_0008_user_0007_scene_0002_cfg_0002", + "recording_id": "task_0008_user_0007_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0008_user_0007_scene_0002_cfg_0002_92_036422060215.jpg", + "images/task_0008_user_0007_scene_0002_cfg_0002_104_036422060215.jpg", + "images/task_0008_user_0007_scene_0002_cfg_0002_116_036422060215.jpg", + "images/task_0008_user_0007_scene_0002_cfg_0002_128_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0008", + "split": "eval", + "split_group_id": "task_0008_user_0007_scene_0002_cfg_0002", + "recording_id": "task_0008_user_0007_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0008_user_0007_scene_0002_cfg_0002_104_036422060215.jpg", + "images/task_0008_user_0007_scene_0002_cfg_0002_116_036422060215.jpg", + "images/task_0008_user_0007_scene_0002_cfg_0002_128_036422060215.jpg", + "images/task_0008_user_0007_scene_0002_cfg_0002_140_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0008", + "split": "eval", + "split_group_id": "task_0008_user_0007_scene_0002_cfg_0002", + "recording_id": "task_0008_user_0007_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0008_user_0007_scene_0002_cfg_0002_116_036422060215.jpg", + "images/task_0008_user_0007_scene_0002_cfg_0002_128_036422060215.jpg", + "images/task_0008_user_0007_scene_0002_cfg_0002_140_036422060215.jpg", + "images/task_0008_user_0007_scene_0002_cfg_0002_152_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0008", + "split": "eval", + "split_group_id": "task_0008_user_0007_scene_0002_cfg_0002", + "recording_id": "task_0008_user_0007_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0008_user_0007_scene_0002_cfg_0002_128_036422060215.jpg", + "images/task_0008_user_0007_scene_0002_cfg_0002_140_036422060215.jpg", + "images/task_0008_user_0007_scene_0002_cfg_0002_152_036422060215.jpg", + "images/task_0008_user_0007_scene_0002_cfg_0002_164_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0008", + "split": "eval", + "split_group_id": "task_0008_user_0007_scene_0002_cfg_0002", + "recording_id": "task_0008_user_0007_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Left" + } + ], + "images": [ + "images/task_0010_user_0007_scene_0006_cfg_0002_40_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_50_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_60_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_70_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0010", + "split": "eval", + "split_group_id": "task_0010_user_0007_scene_0006_cfg_0002", + "recording_id": "task_0010_user_0007_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Up" + } + ], + "images": [ + "images/task_0010_user_0007_scene_0006_cfg_0002_70_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_80_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_90_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_100_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0010", + "split": "eval", + "split_group_id": "task_0010_user_0007_scene_0006_cfg_0002", + "recording_id": "task_0010_user_0007_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Up" + } + ], + "images": [ + "images/task_0010_user_0007_scene_0006_cfg_0002_100_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_110_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_120_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_130_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0010", + "split": "eval", + "split_group_id": "task_0010_user_0007_scene_0006_cfg_0002", + "recording_id": "task_0010_user_0007_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Up" + } + ], + "images": [ + "images/task_0010_user_0007_scene_0006_cfg_0002_110_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_120_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_130_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_140_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0010", + "split": "eval", + "split_group_id": "task_0010_user_0007_scene_0006_cfg_0002", + "recording_id": "task_0010_user_0007_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Down" + } + ], + "images": [ + "images/task_0010_user_0007_scene_0006_cfg_0002_240_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_250_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_260_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_270_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0010", + "split": "eval", + "split_group_id": "task_0010_user_0007_scene_0006_cfg_0002", + "recording_id": "task_0010_user_0007_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Down" + } + ], + "images": [ + "images/task_0010_user_0007_scene_0006_cfg_0002_250_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_260_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_270_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_280_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0010", + "split": "eval", + "split_group_id": "task_0010_user_0007_scene_0006_cfg_0002", + "recording_id": "task_0010_user_0007_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Down" + } + ], + "images": [ + "images/task_0010_user_0007_scene_0006_cfg_0002_260_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_270_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_280_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_290_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0010", + "split": "eval", + "split_group_id": "task_0010_user_0007_scene_0006_cfg_0002", + "recording_id": "task_0010_user_0007_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_0010_user_0007_scene_0006_cfg_0002_380_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_390_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_400_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_410_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0010", + "split": "eval", + "split_group_id": "task_0010_user_0007_scene_0006_cfg_0002", + "recording_id": "task_0010_user_0007_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_0010_user_0007_scene_0006_cfg_0002_390_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_400_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_410_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_420_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0010", + "split": "eval", + "split_group_id": "task_0010_user_0007_scene_0006_cfg_0002", + "recording_id": "task_0010_user_0007_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_0010_user_0007_scene_0006_cfg_0002_400_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_410_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_420_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_430_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0010", + "split": "eval", + "split_group_id": "task_0010_user_0007_scene_0006_cfg_0002", + "recording_id": "task_0010_user_0007_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0010_user_0007_scene_0006_cfg_0002_98_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_101_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_104_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_107_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_110_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0010", + "split": "eval", + "split_group_id": "task_0010_user_0007_scene_0006_cfg_0002", + "recording_id": "task_0010_user_0007_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0010_user_0007_scene_0006_cfg_0002_117_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_120_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_123_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_126_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_129_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0010", + "split": "eval", + "split_group_id": "task_0010_user_0007_scene_0006_cfg_0002", + "recording_id": "task_0010_user_0007_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0010_user_0007_scene_0006_cfg_0002_261_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_264_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_267_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_270_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_273_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0010", + "split": "eval", + "split_group_id": "task_0010_user_0007_scene_0006_cfg_0002", + "recording_id": "task_0010_user_0007_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0010_user_0007_scene_0006_cfg_0002_314_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_317_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_320_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_323_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_326_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0010", + "split": "eval", + "split_group_id": "task_0010_user_0007_scene_0006_cfg_0002", + "recording_id": "task_0010_user_0007_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0010_user_0007_scene_0006_cfg_0002_372_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_375_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_378_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_381_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_384_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0010", + "split": "eval", + "split_group_id": "task_0010_user_0007_scene_0006_cfg_0002", + "recording_id": "task_0010_user_0007_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0010_user_0007_scene_0006_cfg_0002_398_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_401_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_404_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_407_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_410_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0010", + "split": "eval", + "split_group_id": "task_0010_user_0007_scene_0006_cfg_0002", + "recording_id": "task_0010_user_0007_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0010_user_0007_scene_0006_cfg_0002_9_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_22_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_35_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_48_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0010", + "split": "eval", + "split_group_id": "task_0010_user_0007_scene_0006_cfg_0002", + "recording_id": "task_0010_user_0007_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0010_user_0007_scene_0006_cfg_0002_35_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_48_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_61_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_74_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0010", + "split": "eval", + "split_group_id": "task_0010_user_0007_scene_0006_cfg_0002", + "recording_id": "task_0010_user_0007_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0010_user_0007_scene_0006_cfg_0002_61_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_74_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_87_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_100_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0010", + "split": "eval", + "split_group_id": "task_0010_user_0007_scene_0006_cfg_0002", + "recording_id": "task_0010_user_0007_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0010_user_0007_scene_0006_cfg_0002_87_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_100_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_113_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_126_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0010", + "split": "eval", + "split_group_id": "task_0010_user_0007_scene_0006_cfg_0002", + "recording_id": "task_0010_user_0007_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0010_user_0007_scene_0006_cfg_0002_113_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_126_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_139_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_152_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0010", + "split": "eval", + "split_group_id": "task_0010_user_0007_scene_0006_cfg_0002", + "recording_id": "task_0010_user_0007_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0010_user_0007_scene_0006_cfg_0002_139_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_152_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_165_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_178_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0010", + "split": "eval", + "split_group_id": "task_0010_user_0007_scene_0006_cfg_0002", + "recording_id": "task_0010_user_0007_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0010_user_0007_scene_0006_cfg_0002_130_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_140_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0010", + "split": "eval", + "split_group_id": "task_0010_user_0007_scene_0006_cfg_0002", + "recording_id": "task_0010_user_0007_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0010_user_0007_scene_0006_cfg_0002_268_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_365_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0010", + "split": "eval", + "split_group_id": "task_0010_user_0007_scene_0006_cfg_0002", + "recording_id": "task_0010_user_0007_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0010_user_0007_scene_0006_cfg_0002_268_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_140_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0010", + "split": "eval", + "split_group_id": "task_0010_user_0007_scene_0006_cfg_0002", + "recording_id": "task_0010_user_0007_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0010_user_0007_scene_0006_cfg_0002_125_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_128_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_131_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_134_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_137_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0010", + "split": "eval", + "split_group_id": "task_0010_user_0007_scene_0006_cfg_0002", + "recording_id": "task_0010_user_0007_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0010_user_0007_scene_0006_cfg_0002_129_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_132_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_135_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_138_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_141_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0010", + "split": "eval", + "split_group_id": "task_0010_user_0007_scene_0006_cfg_0002", + "recording_id": "task_0010_user_0007_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0010_user_0007_scene_0006_cfg_0002_133_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_136_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_139_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_142_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_145_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0010", + "split": "eval", + "split_group_id": "task_0010_user_0007_scene_0006_cfg_0002", + "recording_id": "task_0010_user_0007_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0010_user_0007_scene_0006_cfg_0002_272_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_275_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_278_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_281_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_284_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0010", + "split": "eval", + "split_group_id": "task_0010_user_0007_scene_0006_cfg_0002", + "recording_id": "task_0010_user_0007_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0010_user_0007_scene_0006_cfg_0002_310_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_313_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_316_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_319_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_322_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0010", + "split": "eval", + "split_group_id": "task_0010_user_0007_scene_0006_cfg_0002", + "recording_id": "task_0010_user_0007_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0010_user_0007_scene_0006_cfg_0002_349_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_352_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_355_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_358_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_361_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0010", + "split": "eval", + "split_group_id": "task_0010_user_0007_scene_0006_cfg_0002", + "recording_id": "task_0010_user_0007_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_0010_user_0007_scene_0006_cfg_0002_316_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_368_036422060215.jpg", + "images/task_0010_user_0007_scene_0006_cfg_0002_265_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0010", + "split": "eval", + "split_group_id": "task_0010_user_0007_scene_0006_cfg_0002", + "recording_id": "task_0010_user_0007_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0011_user_0013_scene_0003_cfg_0002_32_036422060215.jpg", + "images/task_0011_user_0013_scene_0003_cfg_0002_62_036422060215.jpg", + "images/task_0011_user_0013_scene_0003_cfg_0002_92_036422060215.jpg", + "images/task_0011_user_0013_scene_0003_cfg_0002_122_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0011", + "split": "eval", + "split_group_id": "task_0011_user_0013_scene_0003_cfg_0002", + "recording_id": "task_0011_user_0013_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0011_user_0013_scene_0003_cfg_0002_80_036422060215.jpg", + "images/task_0011_user_0013_scene_0003_cfg_0002_110_036422060215.jpg", + "images/task_0011_user_0013_scene_0003_cfg_0002_140_036422060215.jpg", + "images/task_0011_user_0013_scene_0003_cfg_0002_170_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0011", + "split": "eval", + "split_group_id": "task_0011_user_0013_scene_0003_cfg_0002", + "recording_id": "task_0011_user_0013_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0011_user_0013_scene_0003_cfg_0002_128_036422060215.jpg", + "images/task_0011_user_0013_scene_0003_cfg_0002_158_036422060215.jpg", + "images/task_0011_user_0013_scene_0003_cfg_0002_188_036422060215.jpg", + "images/task_0011_user_0013_scene_0003_cfg_0002_218_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0011", + "split": "eval", + "split_group_id": "task_0011_user_0013_scene_0003_cfg_0002", + "recording_id": "task_0011_user_0013_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0011_user_0013_scene_0003_cfg_0002_224_036422060215.jpg", + "images/task_0011_user_0013_scene_0003_cfg_0002_254_036422060215.jpg", + "images/task_0011_user_0013_scene_0003_cfg_0002_284_036422060215.jpg", + "images/task_0011_user_0013_scene_0003_cfg_0002_314_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0011", + "split": "eval", + "split_group_id": "task_0011_user_0013_scene_0003_cfg_0002", + "recording_id": "task_0011_user_0013_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0011_user_0013_scene_0003_cfg_0002_272_036422060215.jpg", + "images/task_0011_user_0013_scene_0003_cfg_0002_302_036422060215.jpg", + "images/task_0011_user_0013_scene_0003_cfg_0002_332_036422060215.jpg", + "images/task_0011_user_0013_scene_0003_cfg_0002_362_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0011", + "split": "eval", + "split_group_id": "task_0011_user_0013_scene_0003_cfg_0002", + "recording_id": "task_0011_user_0013_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0011_user_0013_scene_0003_cfg_0002_320_036422060215.jpg", + "images/task_0011_user_0013_scene_0003_cfg_0002_350_036422060215.jpg", + "images/task_0011_user_0013_scene_0003_cfg_0002_380_036422060215.jpg", + "images/task_0011_user_0013_scene_0003_cfg_0002_410_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0011", + "split": "eval", + "split_group_id": "task_0011_user_0013_scene_0003_cfg_0002", + "recording_id": "task_0011_user_0013_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0011_user_0013_scene_0003_cfg_0002_368_036422060215.jpg", + "images/task_0011_user_0013_scene_0003_cfg_0002_398_036422060215.jpg", + "images/task_0011_user_0013_scene_0003_cfg_0002_428_036422060215.jpg", + "images/task_0011_user_0013_scene_0003_cfg_0002_458_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0011", + "split": "eval", + "split_group_id": "task_0011_user_0013_scene_0003_cfg_0002", + "recording_id": "task_0011_user_0013_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0011_user_0013_scene_0003_cfg_0002_464_036422060215.jpg", + "images/task_0011_user_0013_scene_0003_cfg_0002_494_036422060215.jpg", + "images/task_0011_user_0013_scene_0003_cfg_0002_524_036422060215.jpg", + "images/task_0011_user_0013_scene_0003_cfg_0002_554_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0011", + "split": "eval", + "split_group_id": "task_0011_user_0013_scene_0003_cfg_0002", + "recording_id": "task_0011_user_0013_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0011_user_0013_scene_0003_cfg_0002_512_036422060215.jpg", + "images/task_0011_user_0013_scene_0003_cfg_0002_542_036422060215.jpg", + "images/task_0011_user_0013_scene_0003_cfg_0002_572_036422060215.jpg", + "images/task_0011_user_0013_scene_0003_cfg_0002_602_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0011", + "split": "eval", + "split_group_id": "task_0011_user_0013_scene_0003_cfg_0002", + "recording_id": "task_0011_user_0013_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0011_user_0013_scene_0003_cfg_0002_560_036422060215.jpg", + "images/task_0011_user_0013_scene_0003_cfg_0002_590_036422060215.jpg", + "images/task_0011_user_0013_scene_0003_cfg_0002_620_036422060215.jpg", + "images/task_0011_user_0013_scene_0003_cfg_0002_650_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0011", + "split": "eval", + "split_group_id": "task_0011_user_0013_scene_0003_cfg_0002", + "recording_id": "task_0011_user_0013_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Left" + } + ], + "images": [ + "images/task_0014_user_0006_scene_0007_cfg_0002_50_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_60_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_70_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_80_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0014", + "split": "eval", + "split_group_id": "task_0014_user_0006_scene_0007_cfg_0002", + "recording_id": "task_0014_user_0006_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Left" + } + ], + "images": [ + "images/task_0014_user_0006_scene_0007_cfg_0002_60_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_70_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_80_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_90_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0014", + "split": "eval", + "split_group_id": "task_0014_user_0006_scene_0007_cfg_0002", + "recording_id": "task_0014_user_0006_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Left" + } + ], + "images": [ + "images/task_0014_user_0006_scene_0007_cfg_0002_70_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_80_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_90_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_100_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0014", + "split": "eval", + "split_group_id": "task_0014_user_0006_scene_0007_cfg_0002", + "recording_id": "task_0014_user_0006_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Up" + } + ], + "images": [ + "images/task_0014_user_0006_scene_0007_cfg_0002_160_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_170_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_180_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_190_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0014", + "split": "eval", + "split_group_id": "task_0014_user_0006_scene_0007_cfg_0002", + "recording_id": "task_0014_user_0006_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_0014_user_0006_scene_0007_cfg_0002_180_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_190_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_200_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_210_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0014", + "split": "eval", + "split_group_id": "task_0014_user_0006_scene_0007_cfg_0002", + "recording_id": "task_0014_user_0006_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_0014_user_0006_scene_0007_cfg_0002_190_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_200_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_210_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_220_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0014", + "split": "eval", + "split_group_id": "task_0014_user_0006_scene_0007_cfg_0002", + "recording_id": "task_0014_user_0006_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_0014_user_0006_scene_0007_cfg_0002_200_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_210_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_220_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_230_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0014", + "split": "eval", + "split_group_id": "task_0014_user_0006_scene_0007_cfg_0002", + "recording_id": "task_0014_user_0006_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Down" + } + ], + "images": [ + "images/task_0014_user_0006_scene_0007_cfg_0002_260_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_270_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_280_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_290_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0014", + "split": "eval", + "split_group_id": "task_0014_user_0006_scene_0007_cfg_0002", + "recording_id": "task_0014_user_0006_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Down" + } + ], + "images": [ + "images/task_0014_user_0006_scene_0007_cfg_0002_270_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_280_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_290_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_300_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0014", + "split": "eval", + "split_group_id": "task_0014_user_0006_scene_0007_cfg_0002", + "recording_id": "task_0014_user_0006_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Down" + } + ], + "images": [ + "images/task_0014_user_0006_scene_0007_cfg_0002_280_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_290_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_300_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_310_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0014", + "split": "eval", + "split_group_id": "task_0014_user_0006_scene_0007_cfg_0002", + "recording_id": "task_0014_user_0006_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0014_user_0006_scene_0007_cfg_0002_177_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_180_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_183_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_186_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_189_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0014", + "split": "eval", + "split_group_id": "task_0014_user_0006_scene_0007_cfg_0002", + "recording_id": "task_0014_user_0006_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0014_user_0006_scene_0007_cfg_0002_254_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_257_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_260_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_263_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_266_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0014", + "split": "eval", + "split_group_id": "task_0014_user_0006_scene_0007_cfg_0002", + "recording_id": "task_0014_user_0006_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0014_user_0006_scene_0007_cfg_0002_286_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_289_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_292_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_295_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_298_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0014", + "split": "eval", + "split_group_id": "task_0014_user_0006_scene_0007_cfg_0002", + "recording_id": "task_0014_user_0006_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0014_user_0006_scene_0007_cfg_0002_7_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_17_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_27_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_37_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0014", + "split": "eval", + "split_group_id": "task_0014_user_0006_scene_0007_cfg_0002", + "recording_id": "task_0014_user_0006_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0014_user_0006_scene_0007_cfg_0002_27_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_37_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_47_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_57_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0014", + "split": "eval", + "split_group_id": "task_0014_user_0006_scene_0007_cfg_0002", + "recording_id": "task_0014_user_0006_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0014_user_0006_scene_0007_cfg_0002_47_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_57_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_67_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_77_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0014", + "split": "eval", + "split_group_id": "task_0014_user_0006_scene_0007_cfg_0002", + "recording_id": "task_0014_user_0006_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0014_user_0006_scene_0007_cfg_0002_67_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_77_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_87_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_97_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0014", + "split": "eval", + "split_group_id": "task_0014_user_0006_scene_0007_cfg_0002", + "recording_id": "task_0014_user_0006_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0014_user_0006_scene_0007_cfg_0002_87_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_97_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_107_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_117_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0014", + "split": "eval", + "split_group_id": "task_0014_user_0006_scene_0007_cfg_0002", + "recording_id": "task_0014_user_0006_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0014_user_0006_scene_0007_cfg_0002_107_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_117_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_127_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_137_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0014", + "split": "eval", + "split_group_id": "task_0014_user_0006_scene_0007_cfg_0002", + "recording_id": "task_0014_user_0006_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0014_user_0006_scene_0007_cfg_0002_265_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_205_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0014", + "split": "eval", + "split_group_id": "task_0014_user_0006_scene_0007_cfg_0002", + "recording_id": "task_0014_user_0006_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0014_user_0006_scene_0007_cfg_0002_205_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_208_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_211_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_214_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_217_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0014", + "split": "eval", + "split_group_id": "task_0014_user_0006_scene_0007_cfg_0002", + "recording_id": "task_0014_user_0006_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0014_user_0006_scene_0007_cfg_0002_229_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_232_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_235_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_238_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_241_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0014", + "split": "eval", + "split_group_id": "task_0014_user_0006_scene_0007_cfg_0002", + "recording_id": "task_0014_user_0006_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0014_user_0006_scene_0007_cfg_0002_253_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_256_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_259_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_262_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_265_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0014", + "split": "eval", + "split_group_id": "task_0014_user_0006_scene_0007_cfg_0002", + "recording_id": "task_0014_user_0006_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_0014_user_0006_scene_0007_cfg_0002_203_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_235_036422060215.jpg", + "images/task_0014_user_0006_scene_0007_cfg_0002_267_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0014", + "split": "eval", + "split_group_id": "task_0014_user_0006_scene_0007_cfg_0002", + "recording_id": "task_0014_user_0006_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0024_user_0007_scene_0007_cfg_0002_2_036422060215.jpg", + "images/task_0024_user_0007_scene_0007_cfg_0002_7_036422060215.jpg", + "images/task_0024_user_0007_scene_0007_cfg_0002_12_036422060215.jpg", + "images/task_0024_user_0007_scene_0007_cfg_0002_17_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0024", + "split": "eval", + "split_group_id": "task_0024_user_0007_scene_0007_cfg_0002", + "recording_id": "task_0024_user_0007_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0024_user_0007_scene_0007_cfg_0002_8_036422060215.jpg", + "images/task_0024_user_0007_scene_0007_cfg_0002_13_036422060215.jpg", + "images/task_0024_user_0007_scene_0007_cfg_0002_18_036422060215.jpg", + "images/task_0024_user_0007_scene_0007_cfg_0002_23_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0024", + "split": "eval", + "split_group_id": "task_0024_user_0007_scene_0007_cfg_0002", + "recording_id": "task_0024_user_0007_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0024_user_0007_scene_0007_cfg_0002_14_036422060215.jpg", + "images/task_0024_user_0007_scene_0007_cfg_0002_19_036422060215.jpg", + "images/task_0024_user_0007_scene_0007_cfg_0002_24_036422060215.jpg", + "images/task_0024_user_0007_scene_0007_cfg_0002_29_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0024", + "split": "eval", + "split_group_id": "task_0024_user_0007_scene_0007_cfg_0002", + "recording_id": "task_0024_user_0007_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0024_user_0007_scene_0007_cfg_0002_20_036422060215.jpg", + "images/task_0024_user_0007_scene_0007_cfg_0002_25_036422060215.jpg", + "images/task_0024_user_0007_scene_0007_cfg_0002_30_036422060215.jpg", + "images/task_0024_user_0007_scene_0007_cfg_0002_35_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0024", + "split": "eval", + "split_group_id": "task_0024_user_0007_scene_0007_cfg_0002", + "recording_id": "task_0024_user_0007_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0024_user_0007_scene_0007_cfg_0002_26_036422060215.jpg", + "images/task_0024_user_0007_scene_0007_cfg_0002_31_036422060215.jpg", + "images/task_0024_user_0007_scene_0007_cfg_0002_36_036422060215.jpg", + "images/task_0024_user_0007_scene_0007_cfg_0002_41_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0024", + "split": "eval", + "split_group_id": "task_0024_user_0007_scene_0007_cfg_0002", + "recording_id": "task_0024_user_0007_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0043_user_0007_scene_0001_cfg_0002_11_036422060215.jpg", + "images/task_0043_user_0007_scene_0001_cfg_0002_27_036422060215.jpg", + "images/task_0043_user_0007_scene_0001_cfg_0002_43_036422060215.jpg", + "images/task_0043_user_0007_scene_0001_cfg_0002_59_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0043", + "split": "eval", + "split_group_id": "task_0043_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0043_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0043_user_0007_scene_0001_cfg_0002_27_036422060215.jpg", + "images/task_0043_user_0007_scene_0001_cfg_0002_43_036422060215.jpg", + "images/task_0043_user_0007_scene_0001_cfg_0002_59_036422060215.jpg", + "images/task_0043_user_0007_scene_0001_cfg_0002_75_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0043", + "split": "eval", + "split_group_id": "task_0043_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0043_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0043_user_0007_scene_0001_cfg_0002_43_036422060215.jpg", + "images/task_0043_user_0007_scene_0001_cfg_0002_59_036422060215.jpg", + "images/task_0043_user_0007_scene_0001_cfg_0002_75_036422060215.jpg", + "images/task_0043_user_0007_scene_0001_cfg_0002_91_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0043", + "split": "eval", + "split_group_id": "task_0043_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0043_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0043_user_0007_scene_0001_cfg_0002_59_036422060215.jpg", + "images/task_0043_user_0007_scene_0001_cfg_0002_75_036422060215.jpg", + "images/task_0043_user_0007_scene_0001_cfg_0002_91_036422060215.jpg", + "images/task_0043_user_0007_scene_0001_cfg_0002_107_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0043", + "split": "eval", + "split_group_id": "task_0043_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0043_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0043_user_0007_scene_0001_cfg_0002_75_036422060215.jpg", + "images/task_0043_user_0007_scene_0001_cfg_0002_91_036422060215.jpg", + "images/task_0043_user_0007_scene_0001_cfg_0002_107_036422060215.jpg", + "images/task_0043_user_0007_scene_0001_cfg_0002_123_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0043", + "split": "eval", + "split_group_id": "task_0043_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0043_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0043_user_0007_scene_0001_cfg_0002_107_036422060215.jpg", + "images/task_0043_user_0007_scene_0001_cfg_0002_123_036422060215.jpg", + "images/task_0043_user_0007_scene_0001_cfg_0002_139_036422060215.jpg", + "images/task_0043_user_0007_scene_0001_cfg_0002_155_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0043", + "split": "eval", + "split_group_id": "task_0043_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0043_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0043_user_0007_scene_0001_cfg_0002_123_036422060215.jpg", + "images/task_0043_user_0007_scene_0001_cfg_0002_139_036422060215.jpg", + "images/task_0043_user_0007_scene_0001_cfg_0002_155_036422060215.jpg", + "images/task_0043_user_0007_scene_0001_cfg_0002_171_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0043", + "split": "eval", + "split_group_id": "task_0043_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0043_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0043_user_0007_scene_0001_cfg_0002_139_036422060215.jpg", + "images/task_0043_user_0007_scene_0001_cfg_0002_155_036422060215.jpg", + "images/task_0043_user_0007_scene_0001_cfg_0002_171_036422060215.jpg", + "images/task_0043_user_0007_scene_0001_cfg_0002_187_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0043", + "split": "eval", + "split_group_id": "task_0043_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0043_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0043_user_0007_scene_0001_cfg_0002_155_036422060215.jpg", + "images/task_0043_user_0007_scene_0001_cfg_0002_171_036422060215.jpg", + "images/task_0043_user_0007_scene_0001_cfg_0002_187_036422060215.jpg", + "images/task_0043_user_0007_scene_0001_cfg_0002_203_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0043", + "split": "eval", + "split_group_id": "task_0043_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0043_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0043_user_0007_scene_0001_cfg_0002_171_036422060215.jpg", + "images/task_0043_user_0007_scene_0001_cfg_0002_187_036422060215.jpg", + "images/task_0043_user_0007_scene_0001_cfg_0002_203_036422060215.jpg", + "images/task_0043_user_0007_scene_0001_cfg_0002_219_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0043", + "split": "eval", + "split_group_id": "task_0043_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0043_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0043_user_0007_scene_0005_cfg_0002_4_036422060215.jpg", + "images/task_0043_user_0007_scene_0005_cfg_0002_9_036422060215.jpg", + "images/task_0043_user_0007_scene_0005_cfg_0002_14_036422060215.jpg", + "images/task_0043_user_0007_scene_0005_cfg_0002_19_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0043", + "split": "eval", + "split_group_id": "task_0043_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0043_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0043_user_0007_scene_0005_cfg_0002_10_036422060215.jpg", + "images/task_0043_user_0007_scene_0005_cfg_0002_15_036422060215.jpg", + "images/task_0043_user_0007_scene_0005_cfg_0002_20_036422060215.jpg", + "images/task_0043_user_0007_scene_0005_cfg_0002_25_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0043", + "split": "eval", + "split_group_id": "task_0043_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0043_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0043_user_0007_scene_0005_cfg_0002_16_036422060215.jpg", + "images/task_0043_user_0007_scene_0005_cfg_0002_21_036422060215.jpg", + "images/task_0043_user_0007_scene_0005_cfg_0002_26_036422060215.jpg", + "images/task_0043_user_0007_scene_0005_cfg_0002_31_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0043", + "split": "eval", + "split_group_id": "task_0043_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0043_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0043_user_0007_scene_0005_cfg_0002_22_036422060215.jpg", + "images/task_0043_user_0007_scene_0005_cfg_0002_27_036422060215.jpg", + "images/task_0043_user_0007_scene_0005_cfg_0002_32_036422060215.jpg", + "images/task_0043_user_0007_scene_0005_cfg_0002_37_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0043", + "split": "eval", + "split_group_id": "task_0043_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0043_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0043_user_0007_scene_0005_cfg_0002_28_036422060215.jpg", + "images/task_0043_user_0007_scene_0005_cfg_0002_33_036422060215.jpg", + "images/task_0043_user_0007_scene_0005_cfg_0002_38_036422060215.jpg", + "images/task_0043_user_0007_scene_0005_cfg_0002_43_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0043", + "split": "eval", + "split_group_id": "task_0043_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0043_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0043_user_0007_scene_0005_cfg_0002_34_036422060215.jpg", + "images/task_0043_user_0007_scene_0005_cfg_0002_39_036422060215.jpg", + "images/task_0043_user_0007_scene_0005_cfg_0002_44_036422060215.jpg", + "images/task_0043_user_0007_scene_0005_cfg_0002_49_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0043", + "split": "eval", + "split_group_id": "task_0043_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0043_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0043_user_0007_scene_0005_cfg_0002_40_036422060215.jpg", + "images/task_0043_user_0007_scene_0005_cfg_0002_45_036422060215.jpg", + "images/task_0043_user_0007_scene_0005_cfg_0002_50_036422060215.jpg", + "images/task_0043_user_0007_scene_0005_cfg_0002_55_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0043", + "split": "eval", + "split_group_id": "task_0043_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0043_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0043_user_0007_scene_0005_cfg_0002_46_036422060215.jpg", + "images/task_0043_user_0007_scene_0005_cfg_0002_51_036422060215.jpg", + "images/task_0043_user_0007_scene_0005_cfg_0002_56_036422060215.jpg", + "images/task_0043_user_0007_scene_0005_cfg_0002_61_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0043", + "split": "eval", + "split_group_id": "task_0043_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0043_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0043_user_0007_scene_0005_cfg_0002_52_036422060215.jpg", + "images/task_0043_user_0007_scene_0005_cfg_0002_57_036422060215.jpg", + "images/task_0043_user_0007_scene_0005_cfg_0002_62_036422060215.jpg", + "images/task_0043_user_0007_scene_0005_cfg_0002_67_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0043", + "split": "eval", + "split_group_id": "task_0043_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0043_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0043_user_0007_scene_0005_cfg_0002_58_036422060215.jpg", + "images/task_0043_user_0007_scene_0005_cfg_0002_63_036422060215.jpg", + "images/task_0043_user_0007_scene_0005_cfg_0002_68_036422060215.jpg", + "images/task_0043_user_0007_scene_0005_cfg_0002_73_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0043", + "split": "eval", + "split_group_id": "task_0043_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0043_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0043_user_0007_scene_0007_cfg_0002_9_036422060215.jpg", + "images/task_0043_user_0007_scene_0007_cfg_0002_23_036422060215.jpg", + "images/task_0043_user_0007_scene_0007_cfg_0002_37_036422060215.jpg", + "images/task_0043_user_0007_scene_0007_cfg_0002_51_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0043", + "split": "eval", + "split_group_id": "task_0043_user_0007_scene_0007_cfg_0002", + "recording_id": "task_0043_user_0007_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0043_user_0007_scene_0007_cfg_0002_23_036422060215.jpg", + "images/task_0043_user_0007_scene_0007_cfg_0002_37_036422060215.jpg", + "images/task_0043_user_0007_scene_0007_cfg_0002_51_036422060215.jpg", + "images/task_0043_user_0007_scene_0007_cfg_0002_65_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0043", + "split": "eval", + "split_group_id": "task_0043_user_0007_scene_0007_cfg_0002", + "recording_id": "task_0043_user_0007_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0043_user_0007_scene_0007_cfg_0002_37_036422060215.jpg", + "images/task_0043_user_0007_scene_0007_cfg_0002_51_036422060215.jpg", + "images/task_0043_user_0007_scene_0007_cfg_0002_65_036422060215.jpg", + "images/task_0043_user_0007_scene_0007_cfg_0002_79_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0043", + "split": "eval", + "split_group_id": "task_0043_user_0007_scene_0007_cfg_0002", + "recording_id": "task_0043_user_0007_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0043_user_0007_scene_0007_cfg_0002_51_036422060215.jpg", + "images/task_0043_user_0007_scene_0007_cfg_0002_65_036422060215.jpg", + "images/task_0043_user_0007_scene_0007_cfg_0002_79_036422060215.jpg", + "images/task_0043_user_0007_scene_0007_cfg_0002_93_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0043", + "split": "eval", + "split_group_id": "task_0043_user_0007_scene_0007_cfg_0002", + "recording_id": "task_0043_user_0007_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0043_user_0007_scene_0007_cfg_0002_65_036422060215.jpg", + "images/task_0043_user_0007_scene_0007_cfg_0002_79_036422060215.jpg", + "images/task_0043_user_0007_scene_0007_cfg_0002_93_036422060215.jpg", + "images/task_0043_user_0007_scene_0007_cfg_0002_107_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0043", + "split": "eval", + "split_group_id": "task_0043_user_0007_scene_0007_cfg_0002", + "recording_id": "task_0043_user_0007_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0043_user_0007_scene_0007_cfg_0002_93_036422060215.jpg", + "images/task_0043_user_0007_scene_0007_cfg_0002_107_036422060215.jpg", + "images/task_0043_user_0007_scene_0007_cfg_0002_121_036422060215.jpg", + "images/task_0043_user_0007_scene_0007_cfg_0002_135_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0043", + "split": "eval", + "split_group_id": "task_0043_user_0007_scene_0007_cfg_0002", + "recording_id": "task_0043_user_0007_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0043_user_0007_scene_0007_cfg_0002_107_036422060215.jpg", + "images/task_0043_user_0007_scene_0007_cfg_0002_121_036422060215.jpg", + "images/task_0043_user_0007_scene_0007_cfg_0002_135_036422060215.jpg", + "images/task_0043_user_0007_scene_0007_cfg_0002_149_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0043", + "split": "eval", + "split_group_id": "task_0043_user_0007_scene_0007_cfg_0002", + "recording_id": "task_0043_user_0007_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0043_user_0007_scene_0007_cfg_0002_121_036422060215.jpg", + "images/task_0043_user_0007_scene_0007_cfg_0002_135_036422060215.jpg", + "images/task_0043_user_0007_scene_0007_cfg_0002_149_036422060215.jpg", + "images/task_0043_user_0007_scene_0007_cfg_0002_163_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0043", + "split": "eval", + "split_group_id": "task_0043_user_0007_scene_0007_cfg_0002", + "recording_id": "task_0043_user_0007_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0043_user_0007_scene_0007_cfg_0002_135_036422060215.jpg", + "images/task_0043_user_0007_scene_0007_cfg_0002_149_036422060215.jpg", + "images/task_0043_user_0007_scene_0007_cfg_0002_163_036422060215.jpg", + "images/task_0043_user_0007_scene_0007_cfg_0002_177_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0043", + "split": "eval", + "split_group_id": "task_0043_user_0007_scene_0007_cfg_0002", + "recording_id": "task_0043_user_0007_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0043_user_0007_scene_0007_cfg_0002_149_036422060215.jpg", + "images/task_0043_user_0007_scene_0007_cfg_0002_163_036422060215.jpg", + "images/task_0043_user_0007_scene_0007_cfg_0002_177_036422060215.jpg", + "images/task_0043_user_0007_scene_0007_cfg_0002_191_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0043", + "split": "eval", + "split_group_id": "task_0043_user_0007_scene_0007_cfg_0002", + "recording_id": "task_0043_user_0007_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0045_user_0007_scene_0003_cfg_0002_18_036422060215.jpg", + "images/task_0045_user_0007_scene_0003_cfg_0002_44_036422060215.jpg", + "images/task_0045_user_0007_scene_0003_cfg_0002_70_036422060215.jpg", + "images/task_0045_user_0007_scene_0003_cfg_0002_96_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0045", + "split": "eval", + "split_group_id": "task_0045_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0045_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0045_user_0007_scene_0003_cfg_0002_44_036422060215.jpg", + "images/task_0045_user_0007_scene_0003_cfg_0002_70_036422060215.jpg", + "images/task_0045_user_0007_scene_0003_cfg_0002_96_036422060215.jpg", + "images/task_0045_user_0007_scene_0003_cfg_0002_122_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0045", + "split": "eval", + "split_group_id": "task_0045_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0045_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0045_user_0007_scene_0003_cfg_0002_70_036422060215.jpg", + "images/task_0045_user_0007_scene_0003_cfg_0002_96_036422060215.jpg", + "images/task_0045_user_0007_scene_0003_cfg_0002_122_036422060215.jpg", + "images/task_0045_user_0007_scene_0003_cfg_0002_148_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0045", + "split": "eval", + "split_group_id": "task_0045_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0045_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0045_user_0007_scene_0003_cfg_0002_96_036422060215.jpg", + "images/task_0045_user_0007_scene_0003_cfg_0002_122_036422060215.jpg", + "images/task_0045_user_0007_scene_0003_cfg_0002_148_036422060215.jpg", + "images/task_0045_user_0007_scene_0003_cfg_0002_174_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0045", + "split": "eval", + "split_group_id": "task_0045_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0045_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0045_user_0007_scene_0003_cfg_0002_122_036422060215.jpg", + "images/task_0045_user_0007_scene_0003_cfg_0002_148_036422060215.jpg", + "images/task_0045_user_0007_scene_0003_cfg_0002_174_036422060215.jpg", + "images/task_0045_user_0007_scene_0003_cfg_0002_200_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0045", + "split": "eval", + "split_group_id": "task_0045_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0045_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0045_user_0007_scene_0003_cfg_0002_174_036422060215.jpg", + "images/task_0045_user_0007_scene_0003_cfg_0002_200_036422060215.jpg", + "images/task_0045_user_0007_scene_0003_cfg_0002_226_036422060215.jpg", + "images/task_0045_user_0007_scene_0003_cfg_0002_252_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0045", + "split": "eval", + "split_group_id": "task_0045_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0045_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0045_user_0007_scene_0003_cfg_0002_200_036422060215.jpg", + "images/task_0045_user_0007_scene_0003_cfg_0002_226_036422060215.jpg", + "images/task_0045_user_0007_scene_0003_cfg_0002_252_036422060215.jpg", + "images/task_0045_user_0007_scene_0003_cfg_0002_278_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0045", + "split": "eval", + "split_group_id": "task_0045_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0045_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0045_user_0007_scene_0003_cfg_0002_226_036422060215.jpg", + "images/task_0045_user_0007_scene_0003_cfg_0002_252_036422060215.jpg", + "images/task_0045_user_0007_scene_0003_cfg_0002_278_036422060215.jpg", + "images/task_0045_user_0007_scene_0003_cfg_0002_304_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0045", + "split": "eval", + "split_group_id": "task_0045_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0045_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0045_user_0007_scene_0003_cfg_0002_252_036422060215.jpg", + "images/task_0045_user_0007_scene_0003_cfg_0002_278_036422060215.jpg", + "images/task_0045_user_0007_scene_0003_cfg_0002_304_036422060215.jpg", + "images/task_0045_user_0007_scene_0003_cfg_0002_330_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0045", + "split": "eval", + "split_group_id": "task_0045_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0045_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0045_user_0007_scene_0003_cfg_0002_278_036422060215.jpg", + "images/task_0045_user_0007_scene_0003_cfg_0002_304_036422060215.jpg", + "images/task_0045_user_0007_scene_0003_cfg_0002_330_036422060215.jpg", + "images/task_0045_user_0007_scene_0003_cfg_0002_356_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0045", + "split": "eval", + "split_group_id": "task_0045_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0045_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0046_user_0007_scene_0005_cfg_0002_18_036422060215.jpg", + "images/task_0046_user_0007_scene_0005_cfg_0002_21_036422060215.jpg", + "images/task_0046_user_0007_scene_0005_cfg_0002_24_036422060215.jpg", + "images/task_0046_user_0007_scene_0005_cfg_0002_27_036422060215.jpg", + "images/task_0046_user_0007_scene_0005_cfg_0002_30_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0046_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0046_user_0007_scene_0005_cfg_0002_419_036422060215.jpg", + "images/task_0046_user_0007_scene_0005_cfg_0002_422_036422060215.jpg", + "images/task_0046_user_0007_scene_0005_cfg_0002_425_036422060215.jpg", + "images/task_0046_user_0007_scene_0005_cfg_0002_428_036422060215.jpg", + "images/task_0046_user_0007_scene_0005_cfg_0002_431_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0046_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0046_user_0007_scene_0005_cfg_0002_476_036422060215.jpg", + "images/task_0046_user_0007_scene_0005_cfg_0002_479_036422060215.jpg", + "images/task_0046_user_0007_scene_0005_cfg_0002_482_036422060215.jpg", + "images/task_0046_user_0007_scene_0005_cfg_0002_485_036422060215.jpg", + "images/task_0046_user_0007_scene_0005_cfg_0002_488_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0046_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0046_user_0007_scene_0005_cfg_0002_10_036422060215.jpg", + "images/task_0046_user_0007_scene_0005_cfg_0002_25_036422060215.jpg", + "images/task_0046_user_0007_scene_0005_cfg_0002_40_036422060215.jpg", + "images/task_0046_user_0007_scene_0005_cfg_0002_55_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0046_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0046_user_0007_scene_0005_cfg_0002_40_036422060215.jpg", + "images/task_0046_user_0007_scene_0005_cfg_0002_55_036422060215.jpg", + "images/task_0046_user_0007_scene_0005_cfg_0002_70_036422060215.jpg", + "images/task_0046_user_0007_scene_0005_cfg_0002_85_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0046_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0046_user_0007_scene_0005_cfg_0002_70_036422060215.jpg", + "images/task_0046_user_0007_scene_0005_cfg_0002_85_036422060215.jpg", + "images/task_0046_user_0007_scene_0005_cfg_0002_100_036422060215.jpg", + "images/task_0046_user_0007_scene_0005_cfg_0002_115_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0046_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0046_user_0007_scene_0005_cfg_0002_100_036422060215.jpg", + "images/task_0046_user_0007_scene_0005_cfg_0002_115_036422060215.jpg", + "images/task_0046_user_0007_scene_0005_cfg_0002_130_036422060215.jpg", + "images/task_0046_user_0007_scene_0005_cfg_0002_145_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0046_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0046_user_0007_scene_0005_cfg_0002_130_036422060215.jpg", + "images/task_0046_user_0007_scene_0005_cfg_0002_145_036422060215.jpg", + "images/task_0046_user_0007_scene_0005_cfg_0002_160_036422060215.jpg", + "images/task_0046_user_0007_scene_0005_cfg_0002_175_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0046_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0046_user_0007_scene_0005_cfg_0002_160_036422060215.jpg", + "images/task_0046_user_0007_scene_0005_cfg_0002_175_036422060215.jpg", + "images/task_0046_user_0007_scene_0005_cfg_0002_190_036422060215.jpg", + "images/task_0046_user_0007_scene_0005_cfg_0002_205_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0046_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0046_user_0007_scene_0005_cfg_0002_344_036422060215.jpg", + "images/task_0046_user_0007_scene_0005_cfg_0002_127_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0046_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0046_user_0007_scene_0005_cfg_0002_344_036422060215.jpg", + "images/task_0046_user_0007_scene_0005_cfg_0002_449_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0046_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0046_user_0007_scene_0005_cfg_0002_43_036422060215.jpg", + "images/task_0046_user_0007_scene_0005_cfg_0002_32_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0046_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0046_user_0007_scene_0005_cfg_0002_114_036422060215.jpg", + "images/task_0046_user_0007_scene_0005_cfg_0002_127_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0046_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0046_user_0007_scene_0005_cfg_0002_43_036422060215.jpg", + "images/task_0046_user_0007_scene_0005_cfg_0002_114_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0046_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_0046_user_0007_scene_0005_cfg_0002_452_036422060215.jpg", + "images/task_0046_user_0007_scene_0005_cfg_0002_396_036422060215.jpg", + "images/task_0046_user_0007_scene_0005_cfg_0002_340_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0046_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0005_cfg_0002_14_036422060215.jpg", + "images/task_0046_user_0016_scene_0005_cfg_0002_35_036422060215.jpg", + "images/task_0046_user_0016_scene_0005_cfg_0002_56_036422060215.jpg", + "images/task_0046_user_0016_scene_0005_cfg_0002_77_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0005_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0005_cfg_0002_35_036422060215.jpg", + "images/task_0046_user_0016_scene_0005_cfg_0002_56_036422060215.jpg", + "images/task_0046_user_0016_scene_0005_cfg_0002_77_036422060215.jpg", + "images/task_0046_user_0016_scene_0005_cfg_0002_98_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0005_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0005_cfg_0002_56_036422060215.jpg", + "images/task_0046_user_0016_scene_0005_cfg_0002_77_036422060215.jpg", + "images/task_0046_user_0016_scene_0005_cfg_0002_98_036422060215.jpg", + "images/task_0046_user_0016_scene_0005_cfg_0002_119_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0005_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0005_cfg_0002_77_036422060215.jpg", + "images/task_0046_user_0016_scene_0005_cfg_0002_98_036422060215.jpg", + "images/task_0046_user_0016_scene_0005_cfg_0002_119_036422060215.jpg", + "images/task_0046_user_0016_scene_0005_cfg_0002_140_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0005_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0005_cfg_0002_98_036422060215.jpg", + "images/task_0046_user_0016_scene_0005_cfg_0002_119_036422060215.jpg", + "images/task_0046_user_0016_scene_0005_cfg_0002_140_036422060215.jpg", + "images/task_0046_user_0016_scene_0005_cfg_0002_161_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0005_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0005_cfg_0002_140_036422060215.jpg", + "images/task_0046_user_0016_scene_0005_cfg_0002_161_036422060215.jpg", + "images/task_0046_user_0016_scene_0005_cfg_0002_182_036422060215.jpg", + "images/task_0046_user_0016_scene_0005_cfg_0002_203_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0005_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0005_cfg_0002_161_036422060215.jpg", + "images/task_0046_user_0016_scene_0005_cfg_0002_182_036422060215.jpg", + "images/task_0046_user_0016_scene_0005_cfg_0002_203_036422060215.jpg", + "images/task_0046_user_0016_scene_0005_cfg_0002_224_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0005_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0005_cfg_0002_182_036422060215.jpg", + "images/task_0046_user_0016_scene_0005_cfg_0002_203_036422060215.jpg", + "images/task_0046_user_0016_scene_0005_cfg_0002_224_036422060215.jpg", + "images/task_0046_user_0016_scene_0005_cfg_0002_245_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0005_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0005_cfg_0002_203_036422060215.jpg", + "images/task_0046_user_0016_scene_0005_cfg_0002_224_036422060215.jpg", + "images/task_0046_user_0016_scene_0005_cfg_0002_245_036422060215.jpg", + "images/task_0046_user_0016_scene_0005_cfg_0002_266_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0005_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0005_cfg_0002_224_036422060215.jpg", + "images/task_0046_user_0016_scene_0005_cfg_0002_245_036422060215.jpg", + "images/task_0046_user_0016_scene_0005_cfg_0002_266_036422060215.jpg", + "images/task_0046_user_0016_scene_0005_cfg_0002_287_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0005_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. release\nC. hold and carry\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. pre-approach" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_138_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. approach\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. pre-approach" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_156_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. hold and carry\nC. pre-approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. pre-approach" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_174_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. pre-approach\nC. approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_198_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. hold and carry\nC. transfer\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. approach" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_198_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. transfer\nC. release\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_213_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. hold and carry\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. approach" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_274_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. pre-approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. approach" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_292_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_300_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. hold and carry\nC. approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. hold and carry" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_318_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. contact\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_395_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. contact\nC. hold and carry\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_398_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. hold and carry" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_432_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. pre-approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. hold and carry" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_468_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. hold and carry\nC. contact\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_483_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. contact\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_556_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. transfer\nC. contact\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_569_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. release\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_593_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_138_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_156_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_174_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_198_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_274_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_292_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_300_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_318_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_395_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_432_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_468_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_556_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Left" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_30_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_40_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_50_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_60_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Left" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_40_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_50_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_60_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_70_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Up" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_90_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_100_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_110_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_120_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Left\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Up" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_100_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_110_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_120_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_130_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Up" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_110_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_120_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_130_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_140_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Left" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_180_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_190_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_200_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_210_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_200_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_210_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_220_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_230_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_210_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_220_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_230_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_240_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_230_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_240_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_250_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_260_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Down" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_330_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_340_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_350_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_360_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Down" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_340_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_350_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_360_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_370_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Down" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_380_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_390_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_400_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_410_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_132_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_135_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_138_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_141_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_144_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_168_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_171_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_174_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_177_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_180_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_192_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_195_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_198_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_201_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_204_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_462_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_465_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_468_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_471_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_474_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_587_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_590_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_593_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_596_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_599_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_12_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_30_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_48_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_66_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_48_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_66_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_84_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_102_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_84_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_102_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_120_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_138_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_120_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_138_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_156_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_174_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_156_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_174_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_192_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_210_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_192_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_210_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_228_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_246_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_467_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_421_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_467_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_568_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_589_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_568_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_333_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_284_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_205_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_216_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_284_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_216_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_421_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_333_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_200_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_203_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_206_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_209_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_212_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_204_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_207_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_210_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_213_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_216_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_209_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_212_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_215_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_218_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_221_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_283_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_286_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_289_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_292_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_295_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_302_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_305_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_308_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_311_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_314_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_322_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_325_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_328_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_331_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_334_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_420_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_423_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_426_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_429_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_432_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_438_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_441_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_444_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_447_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_450_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_456_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_459_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_462_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_465_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_468_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_564_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_567_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_570_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_573_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_576_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_572_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_575_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_578_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_581_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_584_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_581_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_584_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_587_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_590_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_593_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_590_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_578_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_567_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_335_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_308_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_282_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_0046_user_0016_scene_0009_cfg_0002_444_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_469_036422060215.jpg", + "images/task_0046_user_0016_scene_0009_cfg_0002_419_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0046", + "split": "eval", + "split_group_id": "task_0046_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0046_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0047_user_0007_scene_0010_cfg_0002_8_036422060215.jpg", + "images/task_0047_user_0007_scene_0010_cfg_0002_20_036422060215.jpg", + "images/task_0047_user_0007_scene_0010_cfg_0002_32_036422060215.jpg", + "images/task_0047_user_0007_scene_0010_cfg_0002_44_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0047_user_0007_scene_0010_cfg_0002_20_036422060215.jpg", + "images/task_0047_user_0007_scene_0010_cfg_0002_32_036422060215.jpg", + "images/task_0047_user_0007_scene_0010_cfg_0002_44_036422060215.jpg", + "images/task_0047_user_0007_scene_0010_cfg_0002_56_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0047_user_0007_scene_0010_cfg_0002_32_036422060215.jpg", + "images/task_0047_user_0007_scene_0010_cfg_0002_44_036422060215.jpg", + "images/task_0047_user_0007_scene_0010_cfg_0002_56_036422060215.jpg", + "images/task_0047_user_0007_scene_0010_cfg_0002_68_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0047_user_0007_scene_0010_cfg_0002_44_036422060215.jpg", + "images/task_0047_user_0007_scene_0010_cfg_0002_56_036422060215.jpg", + "images/task_0047_user_0007_scene_0010_cfg_0002_68_036422060215.jpg", + "images/task_0047_user_0007_scene_0010_cfg_0002_80_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0047_user_0007_scene_0010_cfg_0002_56_036422060215.jpg", + "images/task_0047_user_0007_scene_0010_cfg_0002_68_036422060215.jpg", + "images/task_0047_user_0007_scene_0010_cfg_0002_80_036422060215.jpg", + "images/task_0047_user_0007_scene_0010_cfg_0002_92_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0047_user_0007_scene_0010_cfg_0002_80_036422060215.jpg", + "images/task_0047_user_0007_scene_0010_cfg_0002_92_036422060215.jpg", + "images/task_0047_user_0007_scene_0010_cfg_0002_104_036422060215.jpg", + "images/task_0047_user_0007_scene_0010_cfg_0002_116_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0047_user_0007_scene_0010_cfg_0002_92_036422060215.jpg", + "images/task_0047_user_0007_scene_0010_cfg_0002_104_036422060215.jpg", + "images/task_0047_user_0007_scene_0010_cfg_0002_116_036422060215.jpg", + "images/task_0047_user_0007_scene_0010_cfg_0002_128_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0047_user_0007_scene_0010_cfg_0002_104_036422060215.jpg", + "images/task_0047_user_0007_scene_0010_cfg_0002_116_036422060215.jpg", + "images/task_0047_user_0007_scene_0010_cfg_0002_128_036422060215.jpg", + "images/task_0047_user_0007_scene_0010_cfg_0002_140_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0047_user_0007_scene_0010_cfg_0002_116_036422060215.jpg", + "images/task_0047_user_0007_scene_0010_cfg_0002_128_036422060215.jpg", + "images/task_0047_user_0007_scene_0010_cfg_0002_140_036422060215.jpg", + "images/task_0047_user_0007_scene_0010_cfg_0002_152_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0047_user_0007_scene_0010_cfg_0002_128_036422060215.jpg", + "images/task_0047_user_0007_scene_0010_cfg_0002_140_036422060215.jpg", + "images/task_0047_user_0007_scene_0010_cfg_0002_152_036422060215.jpg", + "images/task_0047_user_0007_scene_0010_cfg_0002_164_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. hold and carry\nC. transfer\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. pre-approach" + } + ], + "images": [ + "images/task_0047_user_0016_scene_0010_cfg_0002_174_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0016_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0016_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. transfer\nC. release\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. pre-approach" + } + ], + "images": [ + "images/task_0047_user_0016_scene_0010_cfg_0002_192_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0016_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0016_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. transfer\nC. approach\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/task_0047_user_0016_scene_0010_cfg_0002_224_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0016_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0016_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. transfer\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/task_0047_user_0016_scene_0010_cfg_0002_232_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0016_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0016_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. pre-approach\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/task_0047_user_0016_scene_0010_cfg_0002_246_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0016_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0016_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. transfer\nC. pre-approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/task_0047_user_0016_scene_0010_cfg_0002_282_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0016_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0016_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. hold and carry\nC. contact\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/task_0047_user_0016_scene_0010_cfg_0002_300_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0016_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0016_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. hold and carry\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. hold and carry" + } + ], + "images": [ + "images/task_0047_user_0016_scene_0010_cfg_0002_310_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0016_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0016_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/task_0047_user_0016_scene_0010_cfg_0002_324_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0016_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0016_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0047_user_0016_scene_0010_cfg_0002_0_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0016_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0016_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0047_user_0016_scene_0010_cfg_0002_36_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0016_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0016_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0047_user_0016_scene_0010_cfg_0002_145_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0016_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0016_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0047_user_0016_scene_0010_cfg_0002_174_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0016_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0016_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0047_user_0016_scene_0010_cfg_0002_210_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0016_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0016_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0047_user_0016_scene_0010_cfg_0002_224_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0016_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0016_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0047_user_0016_scene_0010_cfg_0002_242_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0016_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0016_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0047_user_0016_scene_0010_cfg_0002_260_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0016_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0016_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0047_user_0016_scene_0010_cfg_0002_296_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0016_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0016_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0047_user_0016_scene_0010_cfg_0002_314_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0016_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0016_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0047_user_0016_scene_0010_cfg_0002_332_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0016_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0016_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0047_user_0016_scene_0010_cfg_0002_353_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0016_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0016_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Left" + } + ], + "images": [ + "images/task_0047_user_0016_scene_0010_cfg_0002_10_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_20_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_30_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_40_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0016_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0016_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Left" + } + ], + "images": [ + "images/task_0047_user_0016_scene_0010_cfg_0002_20_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_30_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_40_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_50_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0016_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0016_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Left" + } + ], + "images": [ + "images/task_0047_user_0016_scene_0010_cfg_0002_30_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_40_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_50_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_60_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0016_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0016_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Down\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Up" + } + ], + "images": [ + "images/task_0047_user_0016_scene_0010_cfg_0002_120_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_130_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_140_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_150_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0016_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0016_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Up" + } + ], + "images": [ + "images/task_0047_user_0016_scene_0010_cfg_0002_130_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_140_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_150_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_160_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0016_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0016_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Up" + } + ], + "images": [ + "images/task_0047_user_0016_scene_0010_cfg_0002_140_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_150_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_160_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_170_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0016_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0016_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Left\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_0047_user_0016_scene_0010_cfg_0002_240_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_250_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_260_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_270_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0016_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0016_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_0047_user_0016_scene_0010_cfg_0002_250_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_260_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_270_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_280_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0016_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0016_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_0047_user_0016_scene_0010_cfg_0002_260_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_270_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_280_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_290_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0016_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0016_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0047_user_0016_scene_0010_cfg_0002_186_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_189_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_192_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_195_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_198_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0016_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0016_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0047_user_0016_scene_0010_cfg_0002_276_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_279_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_282_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_285_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_288_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0016_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0016_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0047_user_0016_scene_0010_cfg_0002_294_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_297_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_300_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_303_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_306_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0016_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0016_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0047_user_0016_scene_0010_cfg_0002_7_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_18_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_29_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_40_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0016_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0016_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0047_user_0016_scene_0010_cfg_0002_29_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_40_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_51_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_62_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0016_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0016_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0047_user_0016_scene_0010_cfg_0002_51_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_62_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_73_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_84_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0016_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0016_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0047_user_0016_scene_0010_cfg_0002_73_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_84_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_95_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_106_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0016_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0016_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0047_user_0016_scene_0010_cfg_0002_95_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_106_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_117_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_128_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0016_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0016_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0047_user_0016_scene_0010_cfg_0002_117_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_128_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_139_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_150_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0016_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0016_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0047_user_0016_scene_0010_cfg_0002_253_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_305_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0016_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0016_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0047_user_0016_scene_0010_cfg_0002_252_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_255_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_258_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_261_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_264_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0016_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0016_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0047_user_0016_scene_0010_cfg_0002_273_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_276_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_279_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_282_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_285_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0016_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0016_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0047_user_0016_scene_0010_cfg_0002_294_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_297_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_300_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_303_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_306_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0016_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0016_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_0047_user_0016_scene_0010_cfg_0002_251_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_279_036422060215.jpg", + "images/task_0047_user_0016_scene_0010_cfg_0002_307_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0047", + "split": "eval", + "split_group_id": "task_0047_user_0016_scene_0010_cfg_0002", + "recording_id": "task_0047_user_0016_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0049_user_0007_scene_0006_cfg_0002_10_036422060215.jpg", + "images/task_0049_user_0007_scene_0006_cfg_0002_24_036422060215.jpg", + "images/task_0049_user_0007_scene_0006_cfg_0002_38_036422060215.jpg", + "images/task_0049_user_0007_scene_0006_cfg_0002_52_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0007_scene_0006_cfg_0002", + "recording_id": "task_0049_user_0007_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0049_user_0007_scene_0006_cfg_0002_24_036422060215.jpg", + "images/task_0049_user_0007_scene_0006_cfg_0002_38_036422060215.jpg", + "images/task_0049_user_0007_scene_0006_cfg_0002_52_036422060215.jpg", + "images/task_0049_user_0007_scene_0006_cfg_0002_66_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0007_scene_0006_cfg_0002", + "recording_id": "task_0049_user_0007_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0049_user_0007_scene_0006_cfg_0002_38_036422060215.jpg", + "images/task_0049_user_0007_scene_0006_cfg_0002_52_036422060215.jpg", + "images/task_0049_user_0007_scene_0006_cfg_0002_66_036422060215.jpg", + "images/task_0049_user_0007_scene_0006_cfg_0002_80_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0007_scene_0006_cfg_0002", + "recording_id": "task_0049_user_0007_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0049_user_0007_scene_0006_cfg_0002_52_036422060215.jpg", + "images/task_0049_user_0007_scene_0006_cfg_0002_66_036422060215.jpg", + "images/task_0049_user_0007_scene_0006_cfg_0002_80_036422060215.jpg", + "images/task_0049_user_0007_scene_0006_cfg_0002_94_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0007_scene_0006_cfg_0002", + "recording_id": "task_0049_user_0007_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0049_user_0007_scene_0006_cfg_0002_66_036422060215.jpg", + "images/task_0049_user_0007_scene_0006_cfg_0002_80_036422060215.jpg", + "images/task_0049_user_0007_scene_0006_cfg_0002_94_036422060215.jpg", + "images/task_0049_user_0007_scene_0006_cfg_0002_108_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0007_scene_0006_cfg_0002", + "recording_id": "task_0049_user_0007_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0049_user_0007_scene_0006_cfg_0002_94_036422060215.jpg", + "images/task_0049_user_0007_scene_0006_cfg_0002_108_036422060215.jpg", + "images/task_0049_user_0007_scene_0006_cfg_0002_122_036422060215.jpg", + "images/task_0049_user_0007_scene_0006_cfg_0002_136_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0007_scene_0006_cfg_0002", + "recording_id": "task_0049_user_0007_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0049_user_0007_scene_0006_cfg_0002_108_036422060215.jpg", + "images/task_0049_user_0007_scene_0006_cfg_0002_122_036422060215.jpg", + "images/task_0049_user_0007_scene_0006_cfg_0002_136_036422060215.jpg", + "images/task_0049_user_0007_scene_0006_cfg_0002_150_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0007_scene_0006_cfg_0002", + "recording_id": "task_0049_user_0007_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0049_user_0007_scene_0006_cfg_0002_122_036422060215.jpg", + "images/task_0049_user_0007_scene_0006_cfg_0002_136_036422060215.jpg", + "images/task_0049_user_0007_scene_0006_cfg_0002_150_036422060215.jpg", + "images/task_0049_user_0007_scene_0006_cfg_0002_164_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0007_scene_0006_cfg_0002", + "recording_id": "task_0049_user_0007_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0049_user_0007_scene_0006_cfg_0002_136_036422060215.jpg", + "images/task_0049_user_0007_scene_0006_cfg_0002_150_036422060215.jpg", + "images/task_0049_user_0007_scene_0006_cfg_0002_164_036422060215.jpg", + "images/task_0049_user_0007_scene_0006_cfg_0002_178_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0007_scene_0006_cfg_0002", + "recording_id": "task_0049_user_0007_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0049_user_0007_scene_0006_cfg_0002_150_036422060215.jpg", + "images/task_0049_user_0007_scene_0006_cfg_0002_164_036422060215.jpg", + "images/task_0049_user_0007_scene_0006_cfg_0002_178_036422060215.jpg", + "images/task_0049_user_0007_scene_0006_cfg_0002_192_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0007_scene_0006_cfg_0002", + "recording_id": "task_0049_user_0007_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. pre-approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. pre-approach" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_20_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. approach\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. pre-approach" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_38_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. release\nC. transfer\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. pre-approach" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_56_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. contact\nC. approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. approach" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_80_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_80_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. contact\nC. release\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. approach" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_152_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. contact\nC. approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_182_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. pre-approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. hold and carry" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_200_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. hold and carry" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_253_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. pre-approach\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_266_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. contact\nC. release\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_302_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. hold and carry\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_317_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. hold and carry\nC. approach\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_321_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. hold and carry\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. approach" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_372_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. pre-approach\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_377_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. hold and carry\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. hold and carry" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_392_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. pre-approach\nC. transfer\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_450_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. contact\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_460_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_20_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_38_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_56_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_80_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_152_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_182_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_200_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_253_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_321_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_372_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_377_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Down" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_70_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_80_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_90_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_100_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Down" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_90_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_100_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_110_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_120_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Down" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_100_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_110_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_120_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_130_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_170_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_180_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_190_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_200_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Up" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_180_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_190_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_200_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_210_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Left" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_200_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_210_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_220_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_230_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Left" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_220_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_230_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_240_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_250_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Up" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_270_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_280_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_290_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_300_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Left" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_340_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_350_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_360_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_370_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Up" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_370_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_380_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_390_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_400_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_14_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_17_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_20_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_23_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_26_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_50_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_53_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_56_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_59_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_62_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_296_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_299_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_302_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_305_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_308_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_454_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_457_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_460_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_463_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_466_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_10_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_25_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_40_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_55_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_40_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_55_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_70_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_85_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_70_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_85_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_100_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_115_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_100_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_115_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_130_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_145_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_130_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_145_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_160_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_175_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_160_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_175_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_190_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_205_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_225_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_131_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_384_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_439_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_309_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_320_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_320_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_384_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_225_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_309_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_135_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_138_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_141_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_144_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_147_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_172_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_175_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_178_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_181_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_184_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_209_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_212_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_215_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_218_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_221_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_304_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_307_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_310_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_313_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_316_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_308_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_311_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_314_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_317_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_320_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_313_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_316_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_319_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_322_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_325_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_384_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_387_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_390_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_393_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_396_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_406_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_409_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_412_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_415_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_418_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_427_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_430_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_433_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_436_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_439_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_412_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_441_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_382_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_0049_user_0016_scene_0009_cfg_0002_228_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_128_036422060215.jpg", + "images/task_0049_user_0016_scene_0009_cfg_0002_178_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0049", + "split": "eval", + "split_group_id": "task_0049_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0049_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0050_user_0007_scene_0008_cfg_0002_5_036422060215.jpg", + "images/task_0050_user_0007_scene_0008_cfg_0002_12_036422060215.jpg", + "images/task_0050_user_0007_scene_0008_cfg_0002_19_036422060215.jpg", + "images/task_0050_user_0007_scene_0008_cfg_0002_26_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0050_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0050_user_0007_scene_0008_cfg_0002_12_036422060215.jpg", + "images/task_0050_user_0007_scene_0008_cfg_0002_19_036422060215.jpg", + "images/task_0050_user_0007_scene_0008_cfg_0002_26_036422060215.jpg", + "images/task_0050_user_0007_scene_0008_cfg_0002_33_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0050_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0050_user_0007_scene_0008_cfg_0002_19_036422060215.jpg", + "images/task_0050_user_0007_scene_0008_cfg_0002_26_036422060215.jpg", + "images/task_0050_user_0007_scene_0008_cfg_0002_33_036422060215.jpg", + "images/task_0050_user_0007_scene_0008_cfg_0002_40_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0050_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0050_user_0007_scene_0008_cfg_0002_26_036422060215.jpg", + "images/task_0050_user_0007_scene_0008_cfg_0002_33_036422060215.jpg", + "images/task_0050_user_0007_scene_0008_cfg_0002_40_036422060215.jpg", + "images/task_0050_user_0007_scene_0008_cfg_0002_47_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0050_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0050_user_0007_scene_0008_cfg_0002_33_036422060215.jpg", + "images/task_0050_user_0007_scene_0008_cfg_0002_40_036422060215.jpg", + "images/task_0050_user_0007_scene_0008_cfg_0002_47_036422060215.jpg", + "images/task_0050_user_0007_scene_0008_cfg_0002_54_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0050_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0050_user_0007_scene_0008_cfg_0002_47_036422060215.jpg", + "images/task_0050_user_0007_scene_0008_cfg_0002_54_036422060215.jpg", + "images/task_0050_user_0007_scene_0008_cfg_0002_61_036422060215.jpg", + "images/task_0050_user_0007_scene_0008_cfg_0002_68_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0050_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0050_user_0007_scene_0008_cfg_0002_54_036422060215.jpg", + "images/task_0050_user_0007_scene_0008_cfg_0002_61_036422060215.jpg", + "images/task_0050_user_0007_scene_0008_cfg_0002_68_036422060215.jpg", + "images/task_0050_user_0007_scene_0008_cfg_0002_75_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0050_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0050_user_0007_scene_0008_cfg_0002_61_036422060215.jpg", + "images/task_0050_user_0007_scene_0008_cfg_0002_68_036422060215.jpg", + "images/task_0050_user_0007_scene_0008_cfg_0002_75_036422060215.jpg", + "images/task_0050_user_0007_scene_0008_cfg_0002_82_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0050_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0050_user_0007_scene_0008_cfg_0002_68_036422060215.jpg", + "images/task_0050_user_0007_scene_0008_cfg_0002_75_036422060215.jpg", + "images/task_0050_user_0007_scene_0008_cfg_0002_82_036422060215.jpg", + "images/task_0050_user_0007_scene_0008_cfg_0002_89_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0050_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0050_user_0007_scene_0008_cfg_0002_75_036422060215.jpg", + "images/task_0050_user_0007_scene_0008_cfg_0002_82_036422060215.jpg", + "images/task_0050_user_0007_scene_0008_cfg_0002_89_036422060215.jpg", + "images/task_0050_user_0007_scene_0008_cfg_0002_96_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0050_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0050_user_0013_scene_0001_cfg_0002_29_036422060215.jpg", + "images/task_0050_user_0013_scene_0001_cfg_0002_59_036422060215.jpg", + "images/task_0050_user_0013_scene_0001_cfg_0002_89_036422060215.jpg", + "images/task_0050_user_0013_scene_0001_cfg_0002_119_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0013_scene_0001_cfg_0002", + "recording_id": "task_0050_user_0013_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0050_user_0013_scene_0001_cfg_0002_72_036422060215.jpg", + "images/task_0050_user_0013_scene_0001_cfg_0002_102_036422060215.jpg", + "images/task_0050_user_0013_scene_0001_cfg_0002_132_036422060215.jpg", + "images/task_0050_user_0013_scene_0001_cfg_0002_162_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0013_scene_0001_cfg_0002", + "recording_id": "task_0050_user_0013_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0050_user_0013_scene_0001_cfg_0002_115_036422060215.jpg", + "images/task_0050_user_0013_scene_0001_cfg_0002_145_036422060215.jpg", + "images/task_0050_user_0013_scene_0001_cfg_0002_175_036422060215.jpg", + "images/task_0050_user_0013_scene_0001_cfg_0002_205_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0013_scene_0001_cfg_0002", + "recording_id": "task_0050_user_0013_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0050_user_0013_scene_0001_cfg_0002_201_036422060215.jpg", + "images/task_0050_user_0013_scene_0001_cfg_0002_231_036422060215.jpg", + "images/task_0050_user_0013_scene_0001_cfg_0002_261_036422060215.jpg", + "images/task_0050_user_0013_scene_0001_cfg_0002_291_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0013_scene_0001_cfg_0002", + "recording_id": "task_0050_user_0013_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0050_user_0013_scene_0001_cfg_0002_244_036422060215.jpg", + "images/task_0050_user_0013_scene_0001_cfg_0002_274_036422060215.jpg", + "images/task_0050_user_0013_scene_0001_cfg_0002_304_036422060215.jpg", + "images/task_0050_user_0013_scene_0001_cfg_0002_334_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0013_scene_0001_cfg_0002", + "recording_id": "task_0050_user_0013_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0050_user_0013_scene_0001_cfg_0002_287_036422060215.jpg", + "images/task_0050_user_0013_scene_0001_cfg_0002_317_036422060215.jpg", + "images/task_0050_user_0013_scene_0001_cfg_0002_347_036422060215.jpg", + "images/task_0050_user_0013_scene_0001_cfg_0002_377_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0013_scene_0001_cfg_0002", + "recording_id": "task_0050_user_0013_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0050_user_0013_scene_0001_cfg_0002_330_036422060215.jpg", + "images/task_0050_user_0013_scene_0001_cfg_0002_360_036422060215.jpg", + "images/task_0050_user_0013_scene_0001_cfg_0002_390_036422060215.jpg", + "images/task_0050_user_0013_scene_0001_cfg_0002_420_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0013_scene_0001_cfg_0002", + "recording_id": "task_0050_user_0013_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0050_user_0013_scene_0001_cfg_0002_416_036422060215.jpg", + "images/task_0050_user_0013_scene_0001_cfg_0002_446_036422060215.jpg", + "images/task_0050_user_0013_scene_0001_cfg_0002_476_036422060215.jpg", + "images/task_0050_user_0013_scene_0001_cfg_0002_506_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0013_scene_0001_cfg_0002", + "recording_id": "task_0050_user_0013_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0050_user_0013_scene_0001_cfg_0002_459_036422060215.jpg", + "images/task_0050_user_0013_scene_0001_cfg_0002_489_036422060215.jpg", + "images/task_0050_user_0013_scene_0001_cfg_0002_519_036422060215.jpg", + "images/task_0050_user_0013_scene_0001_cfg_0002_549_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0013_scene_0001_cfg_0002", + "recording_id": "task_0050_user_0013_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0050_user_0013_scene_0001_cfg_0002_502_036422060215.jpg", + "images/task_0050_user_0013_scene_0001_cfg_0002_532_036422060215.jpg", + "images/task_0050_user_0013_scene_0001_cfg_0002_562_036422060215.jpg", + "images/task_0050_user_0013_scene_0001_cfg_0002_592_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0013_scene_0001_cfg_0002", + "recording_id": "task_0050_user_0013_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. approach\nC. release\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. pre-approach" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_118_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. transfer\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. pre-approach" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_136_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. approach\nC. contact\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. approach" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_159_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. transfer\nC. contact\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_159_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. release\nC. pre-approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_172_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. pre-approach\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. hold and carry" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_192_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. contact\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_287_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. transfer\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_302_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. approach" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_360_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. contact\nC. hold and carry\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. approach" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_378_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. pre-approach\nC. release\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. hold and carry" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_416_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. approach\nC. hold and carry\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_606_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. approach\nC. release\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_644_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. contact\nC. release\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. hold and carry" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_645_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. hold and carry\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_764_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_764_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. approach\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_780_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_118_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_136_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_159_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_172_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_192_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_360_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_378_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_416_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_606_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_645_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_764_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_50_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_60_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_70_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_80_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Up" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_70_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_80_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_90_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_100_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_140_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_150_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_160_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_170_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Left" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_200_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_210_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_220_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_230_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Left" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_210_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_220_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_230_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_240_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Left" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_220_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_230_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_240_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_250_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Down" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_400_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_410_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_420_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_430_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Down" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_410_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_420_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_430_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_440_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Up" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_490_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_500_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_510_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_520_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Up" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_500_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_510_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_520_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_530_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_600_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_610_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_620_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_630_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_130_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_133_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_136_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_139_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_142_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_153_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_156_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_159_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_162_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_165_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_639_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_642_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_645_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_648_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_651_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_758_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_761_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_764_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_767_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_770_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_16_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_41_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_66_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_91_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_66_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_91_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_116_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_141_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_116_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_141_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_166_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_191_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_166_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_191_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_216_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_241_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_216_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_241_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_266_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_291_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_266_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_291_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_316_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_341_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_619_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_428_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_199_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_272_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_772_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_782_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_619_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_641_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_641_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_772_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_428_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_384_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_272_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_384_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_200_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_203_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_206_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_209_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_212_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_230_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_233_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_236_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_239_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_242_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_259_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_262_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_265_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_268_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_271_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_382_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_385_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_388_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_391_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_394_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_400_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_403_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_406_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_409_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_412_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_418_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_421_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_424_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_427_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_430_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_615_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_618_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_621_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_624_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_627_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_624_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_627_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_630_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_633_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_636_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_633_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_636_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_639_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_642_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_645_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_766_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_769_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_772_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_775_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_778_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_771_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_774_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_777_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_780_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_783_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_776_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_779_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_782_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_785_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_788_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_618_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_630_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_642_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_382_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_406_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_430_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_0050_user_0016_scene_0007_cfg_0002_275_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_196_036422060215.jpg", + "images/task_0050_user_0016_scene_0007_cfg_0002_236_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0050", + "split": "eval", + "split_group_id": "task_0050_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0050_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0051_user_0007_scene_0004_cfg_0002_5_036422060215.jpg", + "images/task_0051_user_0007_scene_0004_cfg_0002_13_036422060215.jpg", + "images/task_0051_user_0007_scene_0004_cfg_0002_21_036422060215.jpg", + "images/task_0051_user_0007_scene_0004_cfg_0002_29_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0051", + "split": "eval", + "split_group_id": "task_0051_user_0007_scene_0004_cfg_0002", + "recording_id": "task_0051_user_0007_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0051_user_0007_scene_0004_cfg_0002_13_036422060215.jpg", + "images/task_0051_user_0007_scene_0004_cfg_0002_21_036422060215.jpg", + "images/task_0051_user_0007_scene_0004_cfg_0002_29_036422060215.jpg", + "images/task_0051_user_0007_scene_0004_cfg_0002_37_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0051", + "split": "eval", + "split_group_id": "task_0051_user_0007_scene_0004_cfg_0002", + "recording_id": "task_0051_user_0007_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0051_user_0007_scene_0004_cfg_0002_21_036422060215.jpg", + "images/task_0051_user_0007_scene_0004_cfg_0002_29_036422060215.jpg", + "images/task_0051_user_0007_scene_0004_cfg_0002_37_036422060215.jpg", + "images/task_0051_user_0007_scene_0004_cfg_0002_45_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0051", + "split": "eval", + "split_group_id": "task_0051_user_0007_scene_0004_cfg_0002", + "recording_id": "task_0051_user_0007_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0051_user_0007_scene_0004_cfg_0002_29_036422060215.jpg", + "images/task_0051_user_0007_scene_0004_cfg_0002_37_036422060215.jpg", + "images/task_0051_user_0007_scene_0004_cfg_0002_45_036422060215.jpg", + "images/task_0051_user_0007_scene_0004_cfg_0002_53_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0051", + "split": "eval", + "split_group_id": "task_0051_user_0007_scene_0004_cfg_0002", + "recording_id": "task_0051_user_0007_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0051_user_0007_scene_0004_cfg_0002_37_036422060215.jpg", + "images/task_0051_user_0007_scene_0004_cfg_0002_45_036422060215.jpg", + "images/task_0051_user_0007_scene_0004_cfg_0002_53_036422060215.jpg", + "images/task_0051_user_0007_scene_0004_cfg_0002_61_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0051", + "split": "eval", + "split_group_id": "task_0051_user_0007_scene_0004_cfg_0002", + "recording_id": "task_0051_user_0007_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0051_user_0007_scene_0004_cfg_0002_45_036422060215.jpg", + "images/task_0051_user_0007_scene_0004_cfg_0002_53_036422060215.jpg", + "images/task_0051_user_0007_scene_0004_cfg_0002_61_036422060215.jpg", + "images/task_0051_user_0007_scene_0004_cfg_0002_69_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0051", + "split": "eval", + "split_group_id": "task_0051_user_0007_scene_0004_cfg_0002", + "recording_id": "task_0051_user_0007_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0051_user_0007_scene_0004_cfg_0002_53_036422060215.jpg", + "images/task_0051_user_0007_scene_0004_cfg_0002_61_036422060215.jpg", + "images/task_0051_user_0007_scene_0004_cfg_0002_69_036422060215.jpg", + "images/task_0051_user_0007_scene_0004_cfg_0002_77_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0051", + "split": "eval", + "split_group_id": "task_0051_user_0007_scene_0004_cfg_0002", + "recording_id": "task_0051_user_0007_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0051_user_0007_scene_0004_cfg_0002_61_036422060215.jpg", + "images/task_0051_user_0007_scene_0004_cfg_0002_69_036422060215.jpg", + "images/task_0051_user_0007_scene_0004_cfg_0002_77_036422060215.jpg", + "images/task_0051_user_0007_scene_0004_cfg_0002_85_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0051", + "split": "eval", + "split_group_id": "task_0051_user_0007_scene_0004_cfg_0002", + "recording_id": "task_0051_user_0007_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0051_user_0007_scene_0004_cfg_0002_69_036422060215.jpg", + "images/task_0051_user_0007_scene_0004_cfg_0002_77_036422060215.jpg", + "images/task_0051_user_0007_scene_0004_cfg_0002_85_036422060215.jpg", + "images/task_0051_user_0007_scene_0004_cfg_0002_93_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0051", + "split": "eval", + "split_group_id": "task_0051_user_0007_scene_0004_cfg_0002", + "recording_id": "task_0051_user_0007_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0051_user_0007_scene_0004_cfg_0002_77_036422060215.jpg", + "images/task_0051_user_0007_scene_0004_cfg_0002_85_036422060215.jpg", + "images/task_0051_user_0007_scene_0004_cfg_0002_93_036422060215.jpg", + "images/task_0051_user_0007_scene_0004_cfg_0002_101_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0051", + "split": "eval", + "split_group_id": "task_0051_user_0007_scene_0004_cfg_0002", + "recording_id": "task_0051_user_0007_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0051_user_0007_scene_0010_cfg_0002_6_036422060215.jpg", + "images/task_0051_user_0007_scene_0010_cfg_0002_9_036422060215.jpg", + "images/task_0051_user_0007_scene_0010_cfg_0002_12_036422060215.jpg", + "images/task_0051_user_0007_scene_0010_cfg_0002_15_036422060215.jpg", + "images/task_0051_user_0007_scene_0010_cfg_0002_18_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0051", + "split": "eval", + "split_group_id": "task_0051_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0051_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0051_user_0007_scene_0010_cfg_0002_24_036422060215.jpg", + "images/task_0051_user_0007_scene_0010_cfg_0002_27_036422060215.jpg", + "images/task_0051_user_0007_scene_0010_cfg_0002_30_036422060215.jpg", + "images/task_0051_user_0007_scene_0010_cfg_0002_33_036422060215.jpg", + "images/task_0051_user_0007_scene_0010_cfg_0002_36_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0051", + "split": "eval", + "split_group_id": "task_0051_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0051_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0051_user_0007_scene_0010_cfg_0002_60_036422060215.jpg", + "images/task_0051_user_0007_scene_0010_cfg_0002_63_036422060215.jpg", + "images/task_0051_user_0007_scene_0010_cfg_0002_66_036422060215.jpg", + "images/task_0051_user_0007_scene_0010_cfg_0002_69_036422060215.jpg", + "images/task_0051_user_0007_scene_0010_cfg_0002_72_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0051", + "split": "eval", + "split_group_id": "task_0051_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0051_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0051_user_0007_scene_0010_cfg_0002_213_036422060215.jpg", + "images/task_0051_user_0007_scene_0010_cfg_0002_216_036422060215.jpg", + "images/task_0051_user_0007_scene_0010_cfg_0002_219_036422060215.jpg", + "images/task_0051_user_0007_scene_0010_cfg_0002_222_036422060215.jpg", + "images/task_0051_user_0007_scene_0010_cfg_0002_225_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0051", + "split": "eval", + "split_group_id": "task_0051_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0051_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0051_user_0007_scene_0010_cfg_0002_231_036422060215.jpg", + "images/task_0051_user_0007_scene_0010_cfg_0002_234_036422060215.jpg", + "images/task_0051_user_0007_scene_0010_cfg_0002_237_036422060215.jpg", + "images/task_0051_user_0007_scene_0010_cfg_0002_240_036422060215.jpg", + "images/task_0051_user_0007_scene_0010_cfg_0002_243_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0051", + "split": "eval", + "split_group_id": "task_0051_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0051_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0051_user_0007_scene_0010_cfg_0002_354_036422060215.jpg", + "images/task_0051_user_0007_scene_0010_cfg_0002_357_036422060215.jpg", + "images/task_0051_user_0007_scene_0010_cfg_0002_360_036422060215.jpg", + "images/task_0051_user_0007_scene_0010_cfg_0002_363_036422060215.jpg", + "images/task_0051_user_0007_scene_0010_cfg_0002_366_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0051", + "split": "eval", + "split_group_id": "task_0051_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0051_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0051_user_0007_scene_0010_cfg_0002_9_036422060215.jpg", + "images/task_0051_user_0007_scene_0010_cfg_0002_22_036422060215.jpg", + "images/task_0051_user_0007_scene_0010_cfg_0002_35_036422060215.jpg", + "images/task_0051_user_0007_scene_0010_cfg_0002_48_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0051", + "split": "eval", + "split_group_id": "task_0051_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0051_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0051_user_0007_scene_0010_cfg_0002_35_036422060215.jpg", + "images/task_0051_user_0007_scene_0010_cfg_0002_48_036422060215.jpg", + "images/task_0051_user_0007_scene_0010_cfg_0002_61_036422060215.jpg", + "images/task_0051_user_0007_scene_0010_cfg_0002_74_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0051", + "split": "eval", + "split_group_id": "task_0051_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0051_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0051_user_0007_scene_0010_cfg_0002_61_036422060215.jpg", + "images/task_0051_user_0007_scene_0010_cfg_0002_74_036422060215.jpg", + "images/task_0051_user_0007_scene_0010_cfg_0002_87_036422060215.jpg", + "images/task_0051_user_0007_scene_0010_cfg_0002_100_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0051", + "split": "eval", + "split_group_id": "task_0051_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0051_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0051_user_0007_scene_0010_cfg_0002_87_036422060215.jpg", + "images/task_0051_user_0007_scene_0010_cfg_0002_100_036422060215.jpg", + "images/task_0051_user_0007_scene_0010_cfg_0002_113_036422060215.jpg", + "images/task_0051_user_0007_scene_0010_cfg_0002_126_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0051", + "split": "eval", + "split_group_id": "task_0051_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0051_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0051_user_0007_scene_0010_cfg_0002_113_036422060215.jpg", + "images/task_0051_user_0007_scene_0010_cfg_0002_126_036422060215.jpg", + "images/task_0051_user_0007_scene_0010_cfg_0002_139_036422060215.jpg", + "images/task_0051_user_0007_scene_0010_cfg_0002_152_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0051", + "split": "eval", + "split_group_id": "task_0051_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0051_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0051_user_0007_scene_0010_cfg_0002_139_036422060215.jpg", + "images/task_0051_user_0007_scene_0010_cfg_0002_152_036422060215.jpg", + "images/task_0051_user_0007_scene_0010_cfg_0002_165_036422060215.jpg", + "images/task_0051_user_0007_scene_0010_cfg_0002_178_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0051", + "split": "eval", + "split_group_id": "task_0051_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0051_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_0051_user_0007_scene_0010_cfg_0002_171_036422060215.jpg", + "images/task_0051_user_0007_scene_0010_cfg_0002_163_036422060215.jpg", + "images/task_0051_user_0007_scene_0010_cfg_0002_179_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0051", + "split": "eval", + "split_group_id": "task_0051_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0051_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_0051_user_0007_scene_0010_cfg_0002_292_036422060215.jpg", + "images/task_0051_user_0007_scene_0010_cfg_0002_250_036422060215.jpg", + "images/task_0051_user_0007_scene_0010_cfg_0002_333_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0051", + "split": "eval", + "split_group_id": "task_0051_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0051_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0051_user_0013_scene_0001_cfg_0002_145_036422060215.jpg", + "images/task_0051_user_0013_scene_0001_cfg_0002_148_036422060215.jpg", + "images/task_0051_user_0013_scene_0001_cfg_0002_151_036422060215.jpg", + "images/task_0051_user_0013_scene_0001_cfg_0002_154_036422060215.jpg", + "images/task_0051_user_0013_scene_0001_cfg_0002_157_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0051", + "split": "eval", + "split_group_id": "task_0051_user_0013_scene_0001_cfg_0002", + "recording_id": "task_0051_user_0013_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0051_user_0013_scene_0001_cfg_0002_234_036422060215.jpg", + "images/task_0051_user_0013_scene_0001_cfg_0002_237_036422060215.jpg", + "images/task_0051_user_0013_scene_0001_cfg_0002_240_036422060215.jpg", + "images/task_0051_user_0013_scene_0001_cfg_0002_243_036422060215.jpg", + "images/task_0051_user_0013_scene_0001_cfg_0002_246_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0051", + "split": "eval", + "split_group_id": "task_0051_user_0013_scene_0001_cfg_0002", + "recording_id": "task_0051_user_0013_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0051_user_0013_scene_0001_cfg_0002_257_036422060215.jpg", + "images/task_0051_user_0013_scene_0001_cfg_0002_260_036422060215.jpg", + "images/task_0051_user_0013_scene_0001_cfg_0002_263_036422060215.jpg", + "images/task_0051_user_0013_scene_0001_cfg_0002_266_036422060215.jpg", + "images/task_0051_user_0013_scene_0001_cfg_0002_269_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0051", + "split": "eval", + "split_group_id": "task_0051_user_0013_scene_0001_cfg_0002", + "recording_id": "task_0051_user_0013_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0051_user_0013_scene_0001_cfg_0002_275_036422060215.jpg", + "images/task_0051_user_0013_scene_0001_cfg_0002_278_036422060215.jpg", + "images/task_0051_user_0013_scene_0001_cfg_0002_281_036422060215.jpg", + "images/task_0051_user_0013_scene_0001_cfg_0002_284_036422060215.jpg", + "images/task_0051_user_0013_scene_0001_cfg_0002_287_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0051", + "split": "eval", + "split_group_id": "task_0051_user_0013_scene_0001_cfg_0002", + "recording_id": "task_0051_user_0013_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0051_user_0013_scene_0001_cfg_0002_7_036422060215.jpg", + "images/task_0051_user_0013_scene_0001_cfg_0002_17_036422060215.jpg", + "images/task_0051_user_0013_scene_0001_cfg_0002_27_036422060215.jpg", + "images/task_0051_user_0013_scene_0001_cfg_0002_37_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0051", + "split": "eval", + "split_group_id": "task_0051_user_0013_scene_0001_cfg_0002", + "recording_id": "task_0051_user_0013_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0051_user_0013_scene_0001_cfg_0002_27_036422060215.jpg", + "images/task_0051_user_0013_scene_0001_cfg_0002_37_036422060215.jpg", + "images/task_0051_user_0013_scene_0001_cfg_0002_47_036422060215.jpg", + "images/task_0051_user_0013_scene_0001_cfg_0002_57_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0051", + "split": "eval", + "split_group_id": "task_0051_user_0013_scene_0001_cfg_0002", + "recording_id": "task_0051_user_0013_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0051_user_0013_scene_0001_cfg_0002_47_036422060215.jpg", + "images/task_0051_user_0013_scene_0001_cfg_0002_57_036422060215.jpg", + "images/task_0051_user_0013_scene_0001_cfg_0002_67_036422060215.jpg", + "images/task_0051_user_0013_scene_0001_cfg_0002_77_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0051", + "split": "eval", + "split_group_id": "task_0051_user_0013_scene_0001_cfg_0002", + "recording_id": "task_0051_user_0013_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0051_user_0013_scene_0001_cfg_0002_77_036422060215.jpg", + "images/task_0051_user_0013_scene_0001_cfg_0002_87_036422060215.jpg", + "images/task_0051_user_0013_scene_0001_cfg_0002_97_036422060215.jpg", + "images/task_0051_user_0013_scene_0001_cfg_0002_107_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0051", + "split": "eval", + "split_group_id": "task_0051_user_0013_scene_0001_cfg_0002", + "recording_id": "task_0051_user_0013_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0051_user_0013_scene_0001_cfg_0002_97_036422060215.jpg", + "images/task_0051_user_0013_scene_0001_cfg_0002_107_036422060215.jpg", + "images/task_0051_user_0013_scene_0001_cfg_0002_117_036422060215.jpg", + "images/task_0051_user_0013_scene_0001_cfg_0002_127_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0051", + "split": "eval", + "split_group_id": "task_0051_user_0013_scene_0001_cfg_0002", + "recording_id": "task_0051_user_0013_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0051_user_0013_scene_0001_cfg_0002_117_036422060215.jpg", + "images/task_0051_user_0013_scene_0001_cfg_0002_127_036422060215.jpg", + "images/task_0051_user_0013_scene_0001_cfg_0002_137_036422060215.jpg", + "images/task_0051_user_0013_scene_0001_cfg_0002_147_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0051", + "split": "eval", + "split_group_id": "task_0051_user_0013_scene_0001_cfg_0002", + "recording_id": "task_0051_user_0013_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_0051_user_0013_scene_0001_cfg_0002_234_036422060215.jpg", + "images/task_0051_user_0013_scene_0001_cfg_0002_282_036422060215.jpg", + "images/task_0051_user_0013_scene_0001_cfg_0002_258_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0051", + "split": "eval", + "split_group_id": "task_0051_user_0013_scene_0001_cfg_0002", + "recording_id": "task_0051_user_0013_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0051_user_0016_scene_0003_cfg_0002_34_036422060215.jpg", + "images/task_0051_user_0016_scene_0003_cfg_0002_37_036422060215.jpg", + "images/task_0051_user_0016_scene_0003_cfg_0002_40_036422060215.jpg", + "images/task_0051_user_0016_scene_0003_cfg_0002_43_036422060215.jpg", + "images/task_0051_user_0016_scene_0003_cfg_0002_46_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0051", + "split": "eval", + "split_group_id": "task_0051_user_0016_scene_0003_cfg_0002", + "recording_id": "task_0051_user_0016_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0051_user_0016_scene_0003_cfg_0002_94_036422060215.jpg", + "images/task_0051_user_0016_scene_0003_cfg_0002_97_036422060215.jpg", + "images/task_0051_user_0016_scene_0003_cfg_0002_100_036422060215.jpg", + "images/task_0051_user_0016_scene_0003_cfg_0002_103_036422060215.jpg", + "images/task_0051_user_0016_scene_0003_cfg_0002_106_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0051", + "split": "eval", + "split_group_id": "task_0051_user_0016_scene_0003_cfg_0002", + "recording_id": "task_0051_user_0016_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0051_user_0016_scene_0003_cfg_0002_112_036422060215.jpg", + "images/task_0051_user_0016_scene_0003_cfg_0002_115_036422060215.jpg", + "images/task_0051_user_0016_scene_0003_cfg_0002_118_036422060215.jpg", + "images/task_0051_user_0016_scene_0003_cfg_0002_121_036422060215.jpg", + "images/task_0051_user_0016_scene_0003_cfg_0002_124_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0051", + "split": "eval", + "split_group_id": "task_0051_user_0016_scene_0003_cfg_0002", + "recording_id": "task_0051_user_0016_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0051_user_0016_scene_0003_cfg_0002_174_036422060215.jpg", + "images/task_0051_user_0016_scene_0003_cfg_0002_177_036422060215.jpg", + "images/task_0051_user_0016_scene_0003_cfg_0002_180_036422060215.jpg", + "images/task_0051_user_0016_scene_0003_cfg_0002_183_036422060215.jpg", + "images/task_0051_user_0016_scene_0003_cfg_0002_186_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0051", + "split": "eval", + "split_group_id": "task_0051_user_0016_scene_0003_cfg_0002", + "recording_id": "task_0051_user_0016_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0051_user_0016_scene_0003_cfg_0002_4_036422060215.jpg", + "images/task_0051_user_0016_scene_0003_cfg_0002_11_036422060215.jpg", + "images/task_0051_user_0016_scene_0003_cfg_0002_18_036422060215.jpg", + "images/task_0051_user_0016_scene_0003_cfg_0002_25_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0051", + "split": "eval", + "split_group_id": "task_0051_user_0016_scene_0003_cfg_0002", + "recording_id": "task_0051_user_0016_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0051_user_0016_scene_0003_cfg_0002_18_036422060215.jpg", + "images/task_0051_user_0016_scene_0003_cfg_0002_25_036422060215.jpg", + "images/task_0051_user_0016_scene_0003_cfg_0002_32_036422060215.jpg", + "images/task_0051_user_0016_scene_0003_cfg_0002_39_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0051", + "split": "eval", + "split_group_id": "task_0051_user_0016_scene_0003_cfg_0002", + "recording_id": "task_0051_user_0016_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0051_user_0016_scene_0003_cfg_0002_32_036422060215.jpg", + "images/task_0051_user_0016_scene_0003_cfg_0002_39_036422060215.jpg", + "images/task_0051_user_0016_scene_0003_cfg_0002_46_036422060215.jpg", + "images/task_0051_user_0016_scene_0003_cfg_0002_53_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0051", + "split": "eval", + "split_group_id": "task_0051_user_0016_scene_0003_cfg_0002", + "recording_id": "task_0051_user_0016_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0051_user_0016_scene_0003_cfg_0002_39_036422060215.jpg", + "images/task_0051_user_0016_scene_0003_cfg_0002_46_036422060215.jpg", + "images/task_0051_user_0016_scene_0003_cfg_0002_53_036422060215.jpg", + "images/task_0051_user_0016_scene_0003_cfg_0002_60_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0051", + "split": "eval", + "split_group_id": "task_0051_user_0016_scene_0003_cfg_0002", + "recording_id": "task_0051_user_0016_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0051_user_0016_scene_0003_cfg_0002_53_036422060215.jpg", + "images/task_0051_user_0016_scene_0003_cfg_0002_60_036422060215.jpg", + "images/task_0051_user_0016_scene_0003_cfg_0002_67_036422060215.jpg", + "images/task_0051_user_0016_scene_0003_cfg_0002_74_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0051", + "split": "eval", + "split_group_id": "task_0051_user_0016_scene_0003_cfg_0002", + "recording_id": "task_0051_user_0016_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0051_user_0016_scene_0003_cfg_0002_67_036422060215.jpg", + "images/task_0051_user_0016_scene_0003_cfg_0002_74_036422060215.jpg", + "images/task_0051_user_0016_scene_0003_cfg_0002_81_036422060215.jpg", + "images/task_0051_user_0016_scene_0003_cfg_0002_88_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0051", + "split": "eval", + "split_group_id": "task_0051_user_0016_scene_0003_cfg_0002", + "recording_id": "task_0051_user_0016_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_0051_user_0016_scene_0003_cfg_0002_128_036422060215.jpg", + "images/task_0051_user_0016_scene_0003_cfg_0002_118_036422060215.jpg", + "images/task_0051_user_0016_scene_0003_cfg_0002_109_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0051", + "split": "eval", + "split_group_id": "task_0051_user_0016_scene_0003_cfg_0002", + "recording_id": "task_0051_user_0016_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0052_user_0016_scene_0007_cfg_0002_249_036422060215.jpg", + "images/task_0052_user_0016_scene_0007_cfg_0002_252_036422060215.jpg", + "images/task_0052_user_0016_scene_0007_cfg_0002_255_036422060215.jpg", + "images/task_0052_user_0016_scene_0007_cfg_0002_258_036422060215.jpg", + "images/task_0052_user_0016_scene_0007_cfg_0002_261_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0052", + "split": "eval", + "split_group_id": "task_0052_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0052_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0052_user_0016_scene_0007_cfg_0002_295_036422060215.jpg", + "images/task_0052_user_0016_scene_0007_cfg_0002_298_036422060215.jpg", + "images/task_0052_user_0016_scene_0007_cfg_0002_301_036422060215.jpg", + "images/task_0052_user_0016_scene_0007_cfg_0002_304_036422060215.jpg", + "images/task_0052_user_0016_scene_0007_cfg_0002_307_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0052", + "split": "eval", + "split_group_id": "task_0052_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0052_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0052_user_0016_scene_0007_cfg_0002_432_036422060215.jpg", + "images/task_0052_user_0016_scene_0007_cfg_0002_435_036422060215.jpg", + "images/task_0052_user_0016_scene_0007_cfg_0002_438_036422060215.jpg", + "images/task_0052_user_0016_scene_0007_cfg_0002_441_036422060215.jpg", + "images/task_0052_user_0016_scene_0007_cfg_0002_444_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0052", + "split": "eval", + "split_group_id": "task_0052_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0052_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0052_user_0016_scene_0007_cfg_0002_11_036422060215.jpg", + "images/task_0052_user_0016_scene_0007_cfg_0002_27_036422060215.jpg", + "images/task_0052_user_0016_scene_0007_cfg_0002_43_036422060215.jpg", + "images/task_0052_user_0016_scene_0007_cfg_0002_59_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0052", + "split": "eval", + "split_group_id": "task_0052_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0052_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0052_user_0016_scene_0007_cfg_0002_43_036422060215.jpg", + "images/task_0052_user_0016_scene_0007_cfg_0002_59_036422060215.jpg", + "images/task_0052_user_0016_scene_0007_cfg_0002_75_036422060215.jpg", + "images/task_0052_user_0016_scene_0007_cfg_0002_91_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0052", + "split": "eval", + "split_group_id": "task_0052_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0052_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0052_user_0016_scene_0007_cfg_0002_107_036422060215.jpg", + "images/task_0052_user_0016_scene_0007_cfg_0002_123_036422060215.jpg", + "images/task_0052_user_0016_scene_0007_cfg_0002_139_036422060215.jpg", + "images/task_0052_user_0016_scene_0007_cfg_0002_155_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0052", + "split": "eval", + "split_group_id": "task_0052_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0052_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0052_user_0016_scene_0007_cfg_0002_139_036422060215.jpg", + "images/task_0052_user_0016_scene_0007_cfg_0002_155_036422060215.jpg", + "images/task_0052_user_0016_scene_0007_cfg_0002_171_036422060215.jpg", + "images/task_0052_user_0016_scene_0007_cfg_0002_187_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0052", + "split": "eval", + "split_group_id": "task_0052_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0052_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0052_user_0016_scene_0007_cfg_0002_171_036422060215.jpg", + "images/task_0052_user_0016_scene_0007_cfg_0002_187_036422060215.jpg", + "images/task_0052_user_0016_scene_0007_cfg_0002_203_036422060215.jpg", + "images/task_0052_user_0016_scene_0007_cfg_0002_219_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0052", + "split": "eval", + "split_group_id": "task_0052_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0052_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_0052_user_0016_scene_0007_cfg_0002_300_036422060215.jpg", + "images/task_0052_user_0016_scene_0007_cfg_0002_326_036422060215.jpg", + "images/task_0052_user_0016_scene_0007_cfg_0002_353_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0052", + "split": "eval", + "split_group_id": "task_0052_user_0016_scene_0007_cfg_0002", + "recording_id": "task_0052_user_0016_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0054_user_0007_scene_0001_cfg_0002_83_036422060215.jpg", + "images/task_0054_user_0007_scene_0001_cfg_0002_86_036422060215.jpg", + "images/task_0054_user_0007_scene_0001_cfg_0002_89_036422060215.jpg", + "images/task_0054_user_0007_scene_0001_cfg_0002_92_036422060215.jpg", + "images/task_0054_user_0007_scene_0001_cfg_0002_95_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0054", + "split": "eval", + "split_group_id": "task_0054_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0054_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0054_user_0007_scene_0001_cfg_0002_119_036422060215.jpg", + "images/task_0054_user_0007_scene_0001_cfg_0002_122_036422060215.jpg", + "images/task_0054_user_0007_scene_0001_cfg_0002_125_036422060215.jpg", + "images/task_0054_user_0007_scene_0001_cfg_0002_128_036422060215.jpg", + "images/task_0054_user_0007_scene_0001_cfg_0002_131_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0054", + "split": "eval", + "split_group_id": "task_0054_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0054_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0054_user_0007_scene_0001_cfg_0002_143_036422060215.jpg", + "images/task_0054_user_0007_scene_0001_cfg_0002_146_036422060215.jpg", + "images/task_0054_user_0007_scene_0001_cfg_0002_149_036422060215.jpg", + "images/task_0054_user_0007_scene_0001_cfg_0002_152_036422060215.jpg", + "images/task_0054_user_0007_scene_0001_cfg_0002_155_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0054", + "split": "eval", + "split_group_id": "task_0054_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0054_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0054_user_0007_scene_0001_cfg_0002_225_036422060215.jpg", + "images/task_0054_user_0007_scene_0001_cfg_0002_228_036422060215.jpg", + "images/task_0054_user_0007_scene_0001_cfg_0002_231_036422060215.jpg", + "images/task_0054_user_0007_scene_0001_cfg_0002_234_036422060215.jpg", + "images/task_0054_user_0007_scene_0001_cfg_0002_237_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0054", + "split": "eval", + "split_group_id": "task_0054_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0054_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0054_user_0007_scene_0001_cfg_0002_7_036422060215.jpg", + "images/task_0054_user_0007_scene_0001_cfg_0002_17_036422060215.jpg", + "images/task_0054_user_0007_scene_0001_cfg_0002_27_036422060215.jpg", + "images/task_0054_user_0007_scene_0001_cfg_0002_37_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0054", + "split": "eval", + "split_group_id": "task_0054_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0054_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0054_user_0007_scene_0001_cfg_0002_27_036422060215.jpg", + "images/task_0054_user_0007_scene_0001_cfg_0002_37_036422060215.jpg", + "images/task_0054_user_0007_scene_0001_cfg_0002_47_036422060215.jpg", + "images/task_0054_user_0007_scene_0001_cfg_0002_57_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0054", + "split": "eval", + "split_group_id": "task_0054_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0054_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0054_user_0007_scene_0001_cfg_0002_47_036422060215.jpg", + "images/task_0054_user_0007_scene_0001_cfg_0002_57_036422060215.jpg", + "images/task_0054_user_0007_scene_0001_cfg_0002_67_036422060215.jpg", + "images/task_0054_user_0007_scene_0001_cfg_0002_77_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0054", + "split": "eval", + "split_group_id": "task_0054_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0054_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0054_user_0007_scene_0001_cfg_0002_67_036422060215.jpg", + "images/task_0054_user_0007_scene_0001_cfg_0002_77_036422060215.jpg", + "images/task_0054_user_0007_scene_0001_cfg_0002_87_036422060215.jpg", + "images/task_0054_user_0007_scene_0001_cfg_0002_97_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0054", + "split": "eval", + "split_group_id": "task_0054_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0054_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0054_user_0007_scene_0001_cfg_0002_87_036422060215.jpg", + "images/task_0054_user_0007_scene_0001_cfg_0002_97_036422060215.jpg", + "images/task_0054_user_0007_scene_0001_cfg_0002_107_036422060215.jpg", + "images/task_0054_user_0007_scene_0001_cfg_0002_117_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0054", + "split": "eval", + "split_group_id": "task_0054_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0054_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0054_user_0007_scene_0001_cfg_0002_107_036422060215.jpg", + "images/task_0054_user_0007_scene_0001_cfg_0002_117_036422060215.jpg", + "images/task_0054_user_0007_scene_0001_cfg_0002_127_036422060215.jpg", + "images/task_0054_user_0007_scene_0001_cfg_0002_137_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0054", + "split": "eval", + "split_group_id": "task_0054_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0054_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_0054_user_0007_scene_0001_cfg_0002_227_036422060215.jpg", + "images/task_0054_user_0007_scene_0001_cfg_0002_282_036422060215.jpg", + "images/task_0054_user_0007_scene_0001_cfg_0002_254_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0054", + "split": "eval", + "split_group_id": "task_0054_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0054_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0054_user_0016_scene_0004_cfg_0002_6_036422060215.jpg", + "images/task_0054_user_0016_scene_0004_cfg_0002_9_036422060215.jpg", + "images/task_0054_user_0016_scene_0004_cfg_0002_12_036422060215.jpg", + "images/task_0054_user_0016_scene_0004_cfg_0002_15_036422060215.jpg", + "images/task_0054_user_0016_scene_0004_cfg_0002_18_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0054", + "split": "eval", + "split_group_id": "task_0054_user_0016_scene_0004_cfg_0002", + "recording_id": "task_0054_user_0016_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0054_user_0016_scene_0004_cfg_0002_24_036422060215.jpg", + "images/task_0054_user_0016_scene_0004_cfg_0002_27_036422060215.jpg", + "images/task_0054_user_0016_scene_0004_cfg_0002_30_036422060215.jpg", + "images/task_0054_user_0016_scene_0004_cfg_0002_33_036422060215.jpg", + "images/task_0054_user_0016_scene_0004_cfg_0002_36_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0054", + "split": "eval", + "split_group_id": "task_0054_user_0016_scene_0004_cfg_0002", + "recording_id": "task_0054_user_0016_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0054_user_0016_scene_0004_cfg_0002_66_036422060215.jpg", + "images/task_0054_user_0016_scene_0004_cfg_0002_69_036422060215.jpg", + "images/task_0054_user_0016_scene_0004_cfg_0002_72_036422060215.jpg", + "images/task_0054_user_0016_scene_0004_cfg_0002_75_036422060215.jpg", + "images/task_0054_user_0016_scene_0004_cfg_0002_78_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0054", + "split": "eval", + "split_group_id": "task_0054_user_0016_scene_0004_cfg_0002", + "recording_id": "task_0054_user_0016_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0054_user_0016_scene_0004_cfg_0002_84_036422060215.jpg", + "images/task_0054_user_0016_scene_0004_cfg_0002_87_036422060215.jpg", + "images/task_0054_user_0016_scene_0004_cfg_0002_90_036422060215.jpg", + "images/task_0054_user_0016_scene_0004_cfg_0002_93_036422060215.jpg", + "images/task_0054_user_0016_scene_0004_cfg_0002_96_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0054", + "split": "eval", + "split_group_id": "task_0054_user_0016_scene_0004_cfg_0002", + "recording_id": "task_0054_user_0016_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0054_user_0016_scene_0004_cfg_0002_128_036422060215.jpg", + "images/task_0054_user_0016_scene_0004_cfg_0002_131_036422060215.jpg", + "images/task_0054_user_0016_scene_0004_cfg_0002_134_036422060215.jpg", + "images/task_0054_user_0016_scene_0004_cfg_0002_137_036422060215.jpg", + "images/task_0054_user_0016_scene_0004_cfg_0002_140_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0054", + "split": "eval", + "split_group_id": "task_0054_user_0016_scene_0004_cfg_0002", + "recording_id": "task_0054_user_0016_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0054_user_0016_scene_0004_cfg_0002_4_036422060215.jpg", + "images/task_0054_user_0016_scene_0004_cfg_0002_9_036422060215.jpg", + "images/task_0054_user_0016_scene_0004_cfg_0002_14_036422060215.jpg", + "images/task_0054_user_0016_scene_0004_cfg_0002_19_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0054", + "split": "eval", + "split_group_id": "task_0054_user_0016_scene_0004_cfg_0002", + "recording_id": "task_0054_user_0016_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0054_user_0016_scene_0004_cfg_0002_16_036422060215.jpg", + "images/task_0054_user_0016_scene_0004_cfg_0002_21_036422060215.jpg", + "images/task_0054_user_0016_scene_0004_cfg_0002_26_036422060215.jpg", + "images/task_0054_user_0016_scene_0004_cfg_0002_31_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0054", + "split": "eval", + "split_group_id": "task_0054_user_0016_scene_0004_cfg_0002", + "recording_id": "task_0054_user_0016_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0054_user_0016_scene_0004_cfg_0002_28_036422060215.jpg", + "images/task_0054_user_0016_scene_0004_cfg_0002_33_036422060215.jpg", + "images/task_0054_user_0016_scene_0004_cfg_0002_38_036422060215.jpg", + "images/task_0054_user_0016_scene_0004_cfg_0002_43_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0054", + "split": "eval", + "split_group_id": "task_0054_user_0016_scene_0004_cfg_0002", + "recording_id": "task_0054_user_0016_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0054_user_0016_scene_0004_cfg_0002_34_036422060215.jpg", + "images/task_0054_user_0016_scene_0004_cfg_0002_39_036422060215.jpg", + "images/task_0054_user_0016_scene_0004_cfg_0002_44_036422060215.jpg", + "images/task_0054_user_0016_scene_0004_cfg_0002_49_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0054", + "split": "eval", + "split_group_id": "task_0054_user_0016_scene_0004_cfg_0002", + "recording_id": "task_0054_user_0016_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0054_user_0016_scene_0004_cfg_0002_46_036422060215.jpg", + "images/task_0054_user_0016_scene_0004_cfg_0002_51_036422060215.jpg", + "images/task_0054_user_0016_scene_0004_cfg_0002_56_036422060215.jpg", + "images/task_0054_user_0016_scene_0004_cfg_0002_61_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0054", + "split": "eval", + "split_group_id": "task_0054_user_0016_scene_0004_cfg_0002", + "recording_id": "task_0054_user_0016_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0054_user_0016_scene_0004_cfg_0002_58_036422060215.jpg", + "images/task_0054_user_0016_scene_0004_cfg_0002_63_036422060215.jpg", + "images/task_0054_user_0016_scene_0004_cfg_0002_68_036422060215.jpg", + "images/task_0054_user_0016_scene_0004_cfg_0002_73_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0054", + "split": "eval", + "split_group_id": "task_0054_user_0016_scene_0004_cfg_0002", + "recording_id": "task_0054_user_0016_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_0054_user_0016_scene_0004_cfg_0002_93_036422060215.jpg", + "images/task_0054_user_0016_scene_0004_cfg_0002_136_036422060215.jpg", + "images/task_0054_user_0016_scene_0004_cfg_0002_114_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0054", + "split": "eval", + "split_group_id": "task_0054_user_0016_scene_0004_cfg_0002", + "recording_id": "task_0054_user_0016_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0054_user_0016_scene_0006_cfg_0002_71_036422060215.jpg", + "images/task_0054_user_0016_scene_0006_cfg_0002_74_036422060215.jpg", + "images/task_0054_user_0016_scene_0006_cfg_0002_77_036422060215.jpg", + "images/task_0054_user_0016_scene_0006_cfg_0002_80_036422060215.jpg", + "images/task_0054_user_0016_scene_0006_cfg_0002_83_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0054", + "split": "eval", + "split_group_id": "task_0054_user_0016_scene_0006_cfg_0002", + "recording_id": "task_0054_user_0016_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0054_user_0016_scene_0006_cfg_0002_90_036422060215.jpg", + "images/task_0054_user_0016_scene_0006_cfg_0002_93_036422060215.jpg", + "images/task_0054_user_0016_scene_0006_cfg_0002_96_036422060215.jpg", + "images/task_0054_user_0016_scene_0006_cfg_0002_99_036422060215.jpg", + "images/task_0054_user_0016_scene_0006_cfg_0002_102_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0054", + "split": "eval", + "split_group_id": "task_0054_user_0016_scene_0006_cfg_0002", + "recording_id": "task_0054_user_0016_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0054_user_0016_scene_0006_cfg_0002_108_036422060215.jpg", + "images/task_0054_user_0016_scene_0006_cfg_0002_111_036422060215.jpg", + "images/task_0054_user_0016_scene_0006_cfg_0002_114_036422060215.jpg", + "images/task_0054_user_0016_scene_0006_cfg_0002_117_036422060215.jpg", + "images/task_0054_user_0016_scene_0006_cfg_0002_120_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0054", + "split": "eval", + "split_group_id": "task_0054_user_0016_scene_0006_cfg_0002", + "recording_id": "task_0054_user_0016_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0054_user_0016_scene_0006_cfg_0002_198_036422060215.jpg", + "images/task_0054_user_0016_scene_0006_cfg_0002_201_036422060215.jpg", + "images/task_0054_user_0016_scene_0006_cfg_0002_204_036422060215.jpg", + "images/task_0054_user_0016_scene_0006_cfg_0002_207_036422060215.jpg", + "images/task_0054_user_0016_scene_0006_cfg_0002_210_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0054", + "split": "eval", + "split_group_id": "task_0054_user_0016_scene_0006_cfg_0002", + "recording_id": "task_0054_user_0016_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0054_user_0016_scene_0006_cfg_0002_236_036422060215.jpg", + "images/task_0054_user_0016_scene_0006_cfg_0002_239_036422060215.jpg", + "images/task_0054_user_0016_scene_0006_cfg_0002_242_036422060215.jpg", + "images/task_0054_user_0016_scene_0006_cfg_0002_245_036422060215.jpg", + "images/task_0054_user_0016_scene_0006_cfg_0002_248_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0054", + "split": "eval", + "split_group_id": "task_0054_user_0016_scene_0006_cfg_0002", + "recording_id": "task_0054_user_0016_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0054_user_0016_scene_0006_cfg_0002_6_036422060215.jpg", + "images/task_0054_user_0016_scene_0006_cfg_0002_15_036422060215.jpg", + "images/task_0054_user_0016_scene_0006_cfg_0002_24_036422060215.jpg", + "images/task_0054_user_0016_scene_0006_cfg_0002_33_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0054", + "split": "eval", + "split_group_id": "task_0054_user_0016_scene_0006_cfg_0002", + "recording_id": "task_0054_user_0016_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0054_user_0016_scene_0006_cfg_0002_24_036422060215.jpg", + "images/task_0054_user_0016_scene_0006_cfg_0002_33_036422060215.jpg", + "images/task_0054_user_0016_scene_0006_cfg_0002_42_036422060215.jpg", + "images/task_0054_user_0016_scene_0006_cfg_0002_51_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0054", + "split": "eval", + "split_group_id": "task_0054_user_0016_scene_0006_cfg_0002", + "recording_id": "task_0054_user_0016_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0054_user_0016_scene_0006_cfg_0002_42_036422060215.jpg", + "images/task_0054_user_0016_scene_0006_cfg_0002_51_036422060215.jpg", + "images/task_0054_user_0016_scene_0006_cfg_0002_60_036422060215.jpg", + "images/task_0054_user_0016_scene_0006_cfg_0002_69_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0054", + "split": "eval", + "split_group_id": "task_0054_user_0016_scene_0006_cfg_0002", + "recording_id": "task_0054_user_0016_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0054_user_0016_scene_0006_cfg_0002_51_036422060215.jpg", + "images/task_0054_user_0016_scene_0006_cfg_0002_60_036422060215.jpg", + "images/task_0054_user_0016_scene_0006_cfg_0002_69_036422060215.jpg", + "images/task_0054_user_0016_scene_0006_cfg_0002_78_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0054", + "split": "eval", + "split_group_id": "task_0054_user_0016_scene_0006_cfg_0002", + "recording_id": "task_0054_user_0016_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0054_user_0016_scene_0006_cfg_0002_69_036422060215.jpg", + "images/task_0054_user_0016_scene_0006_cfg_0002_78_036422060215.jpg", + "images/task_0054_user_0016_scene_0006_cfg_0002_87_036422060215.jpg", + "images/task_0054_user_0016_scene_0006_cfg_0002_96_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0054", + "split": "eval", + "split_group_id": "task_0054_user_0016_scene_0006_cfg_0002", + "recording_id": "task_0054_user_0016_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0054_user_0016_scene_0006_cfg_0002_87_036422060215.jpg", + "images/task_0054_user_0016_scene_0006_cfg_0002_96_036422060215.jpg", + "images/task_0054_user_0016_scene_0006_cfg_0002_105_036422060215.jpg", + "images/task_0054_user_0016_scene_0006_cfg_0002_114_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0054", + "split": "eval", + "split_group_id": "task_0054_user_0016_scene_0006_cfg_0002", + "recording_id": "task_0054_user_0016_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_0054_user_0016_scene_0006_cfg_0002_244_036422060215.jpg", + "images/task_0054_user_0016_scene_0006_cfg_0002_231_036422060215.jpg", + "images/task_0054_user_0016_scene_0006_cfg_0002_218_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0054", + "split": "eval", + "split_group_id": "task_0054_user_0016_scene_0006_cfg_0002", + "recording_id": "task_0054_user_0016_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_0054_user_0016_scene_0006_cfg_0002_156_036422060215.jpg", + "images/task_0054_user_0016_scene_0006_cfg_0002_166_036422060215.jpg", + "images/task_0054_user_0016_scene_0006_cfg_0002_147_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0054", + "split": "eval", + "split_group_id": "task_0054_user_0016_scene_0006_cfg_0002", + "recording_id": "task_0054_user_0016_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0055_user_0007_scene_0001_cfg_0002_13_036422060215.jpg", + "images/task_0055_user_0007_scene_0001_cfg_0002_32_036422060215.jpg", + "images/task_0055_user_0007_scene_0001_cfg_0002_51_036422060215.jpg", + "images/task_0055_user_0007_scene_0001_cfg_0002_70_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0055", + "split": "eval", + "split_group_id": "task_0055_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0055_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0055_user_0007_scene_0001_cfg_0002_32_036422060215.jpg", + "images/task_0055_user_0007_scene_0001_cfg_0002_51_036422060215.jpg", + "images/task_0055_user_0007_scene_0001_cfg_0002_70_036422060215.jpg", + "images/task_0055_user_0007_scene_0001_cfg_0002_89_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0055", + "split": "eval", + "split_group_id": "task_0055_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0055_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0055_user_0007_scene_0001_cfg_0002_51_036422060215.jpg", + "images/task_0055_user_0007_scene_0001_cfg_0002_70_036422060215.jpg", + "images/task_0055_user_0007_scene_0001_cfg_0002_89_036422060215.jpg", + "images/task_0055_user_0007_scene_0001_cfg_0002_108_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0055", + "split": "eval", + "split_group_id": "task_0055_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0055_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0055_user_0007_scene_0001_cfg_0002_70_036422060215.jpg", + "images/task_0055_user_0007_scene_0001_cfg_0002_89_036422060215.jpg", + "images/task_0055_user_0007_scene_0001_cfg_0002_108_036422060215.jpg", + "images/task_0055_user_0007_scene_0001_cfg_0002_127_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0055", + "split": "eval", + "split_group_id": "task_0055_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0055_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0055_user_0007_scene_0001_cfg_0002_89_036422060215.jpg", + "images/task_0055_user_0007_scene_0001_cfg_0002_108_036422060215.jpg", + "images/task_0055_user_0007_scene_0001_cfg_0002_127_036422060215.jpg", + "images/task_0055_user_0007_scene_0001_cfg_0002_146_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0055", + "split": "eval", + "split_group_id": "task_0055_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0055_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0055_user_0007_scene_0001_cfg_0002_127_036422060215.jpg", + "images/task_0055_user_0007_scene_0001_cfg_0002_146_036422060215.jpg", + "images/task_0055_user_0007_scene_0001_cfg_0002_165_036422060215.jpg", + "images/task_0055_user_0007_scene_0001_cfg_0002_184_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0055", + "split": "eval", + "split_group_id": "task_0055_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0055_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0055_user_0007_scene_0001_cfg_0002_146_036422060215.jpg", + "images/task_0055_user_0007_scene_0001_cfg_0002_165_036422060215.jpg", + "images/task_0055_user_0007_scene_0001_cfg_0002_184_036422060215.jpg", + "images/task_0055_user_0007_scene_0001_cfg_0002_203_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0055", + "split": "eval", + "split_group_id": "task_0055_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0055_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0055_user_0007_scene_0001_cfg_0002_165_036422060215.jpg", + "images/task_0055_user_0007_scene_0001_cfg_0002_184_036422060215.jpg", + "images/task_0055_user_0007_scene_0001_cfg_0002_203_036422060215.jpg", + "images/task_0055_user_0007_scene_0001_cfg_0002_222_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0055", + "split": "eval", + "split_group_id": "task_0055_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0055_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0055_user_0007_scene_0001_cfg_0002_184_036422060215.jpg", + "images/task_0055_user_0007_scene_0001_cfg_0002_203_036422060215.jpg", + "images/task_0055_user_0007_scene_0001_cfg_0002_222_036422060215.jpg", + "images/task_0055_user_0007_scene_0001_cfg_0002_241_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0055", + "split": "eval", + "split_group_id": "task_0055_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0055_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0055_user_0007_scene_0001_cfg_0002_203_036422060215.jpg", + "images/task_0055_user_0007_scene_0001_cfg_0002_222_036422060215.jpg", + "images/task_0055_user_0007_scene_0001_cfg_0002_241_036422060215.jpg", + "images/task_0055_user_0007_scene_0001_cfg_0002_260_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0055", + "split": "eval", + "split_group_id": "task_0055_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0055_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0055_user_0016_scene_0001_cfg_0002_20_036422060215.jpg", + "images/task_0055_user_0016_scene_0001_cfg_0002_50_036422060215.jpg", + "images/task_0055_user_0016_scene_0001_cfg_0002_80_036422060215.jpg", + "images/task_0055_user_0016_scene_0001_cfg_0002_110_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0055", + "split": "eval", + "split_group_id": "task_0055_user_0016_scene_0001_cfg_0002", + "recording_id": "task_0055_user_0016_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0055_user_0016_scene_0001_cfg_0002_50_036422060215.jpg", + "images/task_0055_user_0016_scene_0001_cfg_0002_80_036422060215.jpg", + "images/task_0055_user_0016_scene_0001_cfg_0002_110_036422060215.jpg", + "images/task_0055_user_0016_scene_0001_cfg_0002_140_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0055", + "split": "eval", + "split_group_id": "task_0055_user_0016_scene_0001_cfg_0002", + "recording_id": "task_0055_user_0016_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0055_user_0016_scene_0001_cfg_0002_80_036422060215.jpg", + "images/task_0055_user_0016_scene_0001_cfg_0002_110_036422060215.jpg", + "images/task_0055_user_0016_scene_0001_cfg_0002_140_036422060215.jpg", + "images/task_0055_user_0016_scene_0001_cfg_0002_170_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0055", + "split": "eval", + "split_group_id": "task_0055_user_0016_scene_0001_cfg_0002", + "recording_id": "task_0055_user_0016_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0055_user_0016_scene_0001_cfg_0002_110_036422060215.jpg", + "images/task_0055_user_0016_scene_0001_cfg_0002_140_036422060215.jpg", + "images/task_0055_user_0016_scene_0001_cfg_0002_170_036422060215.jpg", + "images/task_0055_user_0016_scene_0001_cfg_0002_200_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0055", + "split": "eval", + "split_group_id": "task_0055_user_0016_scene_0001_cfg_0002", + "recording_id": "task_0055_user_0016_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0055_user_0016_scene_0001_cfg_0002_140_036422060215.jpg", + "images/task_0055_user_0016_scene_0001_cfg_0002_170_036422060215.jpg", + "images/task_0055_user_0016_scene_0001_cfg_0002_200_036422060215.jpg", + "images/task_0055_user_0016_scene_0001_cfg_0002_230_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0055", + "split": "eval", + "split_group_id": "task_0055_user_0016_scene_0001_cfg_0002", + "recording_id": "task_0055_user_0016_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0055_user_0016_scene_0001_cfg_0002_200_036422060215.jpg", + "images/task_0055_user_0016_scene_0001_cfg_0002_230_036422060215.jpg", + "images/task_0055_user_0016_scene_0001_cfg_0002_260_036422060215.jpg", + "images/task_0055_user_0016_scene_0001_cfg_0002_290_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0055", + "split": "eval", + "split_group_id": "task_0055_user_0016_scene_0001_cfg_0002", + "recording_id": "task_0055_user_0016_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0055_user_0016_scene_0001_cfg_0002_230_036422060215.jpg", + "images/task_0055_user_0016_scene_0001_cfg_0002_260_036422060215.jpg", + "images/task_0055_user_0016_scene_0001_cfg_0002_290_036422060215.jpg", + "images/task_0055_user_0016_scene_0001_cfg_0002_320_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0055", + "split": "eval", + "split_group_id": "task_0055_user_0016_scene_0001_cfg_0002", + "recording_id": "task_0055_user_0016_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0055_user_0016_scene_0001_cfg_0002_260_036422060215.jpg", + "images/task_0055_user_0016_scene_0001_cfg_0002_290_036422060215.jpg", + "images/task_0055_user_0016_scene_0001_cfg_0002_320_036422060215.jpg", + "images/task_0055_user_0016_scene_0001_cfg_0002_350_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0055", + "split": "eval", + "split_group_id": "task_0055_user_0016_scene_0001_cfg_0002", + "recording_id": "task_0055_user_0016_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0055_user_0016_scene_0001_cfg_0002_290_036422060215.jpg", + "images/task_0055_user_0016_scene_0001_cfg_0002_320_036422060215.jpg", + "images/task_0055_user_0016_scene_0001_cfg_0002_350_036422060215.jpg", + "images/task_0055_user_0016_scene_0001_cfg_0002_380_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0055", + "split": "eval", + "split_group_id": "task_0055_user_0016_scene_0001_cfg_0002", + "recording_id": "task_0055_user_0016_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0055_user_0016_scene_0001_cfg_0002_320_036422060215.jpg", + "images/task_0055_user_0016_scene_0001_cfg_0002_350_036422060215.jpg", + "images/task_0055_user_0016_scene_0001_cfg_0002_380_036422060215.jpg", + "images/task_0055_user_0016_scene_0001_cfg_0002_410_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0055", + "split": "eval", + "split_group_id": "task_0055_user_0016_scene_0001_cfg_0002", + "recording_id": "task_0055_user_0016_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0055_user_0016_scene_0009_cfg_0002_15_036422060215.jpg", + "images/task_0055_user_0016_scene_0009_cfg_0002_37_036422060215.jpg", + "images/task_0055_user_0016_scene_0009_cfg_0002_59_036422060215.jpg", + "images/task_0055_user_0016_scene_0009_cfg_0002_81_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0055", + "split": "eval", + "split_group_id": "task_0055_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0055_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0055_user_0016_scene_0009_cfg_0002_37_036422060215.jpg", + "images/task_0055_user_0016_scene_0009_cfg_0002_59_036422060215.jpg", + "images/task_0055_user_0016_scene_0009_cfg_0002_81_036422060215.jpg", + "images/task_0055_user_0016_scene_0009_cfg_0002_103_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0055", + "split": "eval", + "split_group_id": "task_0055_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0055_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0055_user_0016_scene_0009_cfg_0002_59_036422060215.jpg", + "images/task_0055_user_0016_scene_0009_cfg_0002_81_036422060215.jpg", + "images/task_0055_user_0016_scene_0009_cfg_0002_103_036422060215.jpg", + "images/task_0055_user_0016_scene_0009_cfg_0002_125_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0055", + "split": "eval", + "split_group_id": "task_0055_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0055_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0055_user_0016_scene_0009_cfg_0002_81_036422060215.jpg", + "images/task_0055_user_0016_scene_0009_cfg_0002_103_036422060215.jpg", + "images/task_0055_user_0016_scene_0009_cfg_0002_125_036422060215.jpg", + "images/task_0055_user_0016_scene_0009_cfg_0002_147_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0055", + "split": "eval", + "split_group_id": "task_0055_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0055_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0055_user_0016_scene_0009_cfg_0002_103_036422060215.jpg", + "images/task_0055_user_0016_scene_0009_cfg_0002_125_036422060215.jpg", + "images/task_0055_user_0016_scene_0009_cfg_0002_147_036422060215.jpg", + "images/task_0055_user_0016_scene_0009_cfg_0002_169_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0055", + "split": "eval", + "split_group_id": "task_0055_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0055_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0055_user_0016_scene_0009_cfg_0002_147_036422060215.jpg", + "images/task_0055_user_0016_scene_0009_cfg_0002_169_036422060215.jpg", + "images/task_0055_user_0016_scene_0009_cfg_0002_191_036422060215.jpg", + "images/task_0055_user_0016_scene_0009_cfg_0002_213_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0055", + "split": "eval", + "split_group_id": "task_0055_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0055_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0055_user_0016_scene_0009_cfg_0002_169_036422060215.jpg", + "images/task_0055_user_0016_scene_0009_cfg_0002_191_036422060215.jpg", + "images/task_0055_user_0016_scene_0009_cfg_0002_213_036422060215.jpg", + "images/task_0055_user_0016_scene_0009_cfg_0002_235_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0055", + "split": "eval", + "split_group_id": "task_0055_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0055_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0055_user_0016_scene_0009_cfg_0002_191_036422060215.jpg", + "images/task_0055_user_0016_scene_0009_cfg_0002_213_036422060215.jpg", + "images/task_0055_user_0016_scene_0009_cfg_0002_235_036422060215.jpg", + "images/task_0055_user_0016_scene_0009_cfg_0002_257_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0055", + "split": "eval", + "split_group_id": "task_0055_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0055_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0055_user_0016_scene_0009_cfg_0002_213_036422060215.jpg", + "images/task_0055_user_0016_scene_0009_cfg_0002_235_036422060215.jpg", + "images/task_0055_user_0016_scene_0009_cfg_0002_257_036422060215.jpg", + "images/task_0055_user_0016_scene_0009_cfg_0002_279_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0055", + "split": "eval", + "split_group_id": "task_0055_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0055_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0055_user_0016_scene_0009_cfg_0002_235_036422060215.jpg", + "images/task_0055_user_0016_scene_0009_cfg_0002_257_036422060215.jpg", + "images/task_0055_user_0016_scene_0009_cfg_0002_279_036422060215.jpg", + "images/task_0055_user_0016_scene_0009_cfg_0002_301_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0055", + "split": "eval", + "split_group_id": "task_0055_user_0016_scene_0009_cfg_0002", + "recording_id": "task_0055_user_0016_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0056_user_0007_scene_0010_cfg_0002_62_036422060215.jpg", + "images/task_0056_user_0007_scene_0010_cfg_0002_65_036422060215.jpg", + "images/task_0056_user_0007_scene_0010_cfg_0002_68_036422060215.jpg", + "images/task_0056_user_0007_scene_0010_cfg_0002_71_036422060215.jpg", + "images/task_0056_user_0007_scene_0010_cfg_0002_74_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0056", + "split": "eval", + "split_group_id": "task_0056_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0056_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0056_user_0007_scene_0010_cfg_0002_80_036422060215.jpg", + "images/task_0056_user_0007_scene_0010_cfg_0002_83_036422060215.jpg", + "images/task_0056_user_0007_scene_0010_cfg_0002_86_036422060215.jpg", + "images/task_0056_user_0007_scene_0010_cfg_0002_89_036422060215.jpg", + "images/task_0056_user_0007_scene_0010_cfg_0002_92_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0056", + "split": "eval", + "split_group_id": "task_0056_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0056_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0056_user_0007_scene_0010_cfg_0002_123_036422060215.jpg", + "images/task_0056_user_0007_scene_0010_cfg_0002_126_036422060215.jpg", + "images/task_0056_user_0007_scene_0010_cfg_0002_129_036422060215.jpg", + "images/task_0056_user_0007_scene_0010_cfg_0002_132_036422060215.jpg", + "images/task_0056_user_0007_scene_0010_cfg_0002_135_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0056", + "split": "eval", + "split_group_id": "task_0056_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0056_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0056_user_0007_scene_0010_cfg_0002_141_036422060215.jpg", + "images/task_0056_user_0007_scene_0010_cfg_0002_144_036422060215.jpg", + "images/task_0056_user_0007_scene_0010_cfg_0002_147_036422060215.jpg", + "images/task_0056_user_0007_scene_0010_cfg_0002_150_036422060215.jpg", + "images/task_0056_user_0007_scene_0010_cfg_0002_153_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0056", + "split": "eval", + "split_group_id": "task_0056_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0056_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0056_user_0007_scene_0010_cfg_0002_4_036422060215.jpg", + "images/task_0056_user_0007_scene_0010_cfg_0002_10_036422060215.jpg", + "images/task_0056_user_0007_scene_0010_cfg_0002_16_036422060215.jpg", + "images/task_0056_user_0007_scene_0010_cfg_0002_22_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0056", + "split": "eval", + "split_group_id": "task_0056_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0056_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0056_user_0007_scene_0010_cfg_0002_16_036422060215.jpg", + "images/task_0056_user_0007_scene_0010_cfg_0002_22_036422060215.jpg", + "images/task_0056_user_0007_scene_0010_cfg_0002_28_036422060215.jpg", + "images/task_0056_user_0007_scene_0010_cfg_0002_34_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0056", + "split": "eval", + "split_group_id": "task_0056_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0056_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0056_user_0007_scene_0010_cfg_0002_28_036422060215.jpg", + "images/task_0056_user_0007_scene_0010_cfg_0002_34_036422060215.jpg", + "images/task_0056_user_0007_scene_0010_cfg_0002_40_036422060215.jpg", + "images/task_0056_user_0007_scene_0010_cfg_0002_46_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0056", + "split": "eval", + "split_group_id": "task_0056_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0056_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0056_user_0007_scene_0010_cfg_0002_34_036422060215.jpg", + "images/task_0056_user_0007_scene_0010_cfg_0002_40_036422060215.jpg", + "images/task_0056_user_0007_scene_0010_cfg_0002_46_036422060215.jpg", + "images/task_0056_user_0007_scene_0010_cfg_0002_52_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0056", + "split": "eval", + "split_group_id": "task_0056_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0056_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0056_user_0007_scene_0010_cfg_0002_46_036422060215.jpg", + "images/task_0056_user_0007_scene_0010_cfg_0002_52_036422060215.jpg", + "images/task_0056_user_0007_scene_0010_cfg_0002_58_036422060215.jpg", + "images/task_0056_user_0007_scene_0010_cfg_0002_64_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0056", + "split": "eval", + "split_group_id": "task_0056_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0056_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0056_user_0007_scene_0010_cfg_0002_58_036422060215.jpg", + "images/task_0056_user_0007_scene_0010_cfg_0002_64_036422060215.jpg", + "images/task_0056_user_0007_scene_0010_cfg_0002_70_036422060215.jpg", + "images/task_0056_user_0007_scene_0010_cfg_0002_76_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0056", + "split": "eval", + "split_group_id": "task_0056_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0056_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. release\nC. pre-approach\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. pre-approach" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_12_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. hold and carry\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. pre-approach" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_30_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. release\nC. pre-approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. approach" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_58_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. transfer\nC. pre-approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_59_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. hold and carry\nC. transfer\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_73_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. pre-approach\nC. approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_189_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. hold and carry\nC. contact\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. hold and carry" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_197_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. transfer\nC. release\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_394_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. pre-approach\nC. release\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_452_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. release\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_470_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. contact\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_507_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. transfer\nC. release\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. approach" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_660_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. release\nC. pre-approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_1050_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. release\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. approach" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_1063_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. contact\nC. transfer\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. hold and carry" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_1080_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. transfer\nC. pre-approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_1096_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. transfer\nC. pre-approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_1105_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_12_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_30_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_58_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_189_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_452_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_507_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes ��� contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_660_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_1050_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_1063_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_1080_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Left\nC. Down\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Up" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_30_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_40_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_50_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_60_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_50_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_60_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_70_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_80_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Up" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_90_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_100_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_110_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_120_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Up" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_100_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_110_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_120_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_130_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Left" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_310_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_320_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_330_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_340_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Down" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_330_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_340_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_350_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_360_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Down\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_350_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_360_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_370_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_380_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_360_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_370_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_380_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_390_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Left" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_1090_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_1100_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_1110_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_1120_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Down" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_1170_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_1180_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_1190_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_1200_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Down" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_1180_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_1190_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_1200_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_1210_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_6_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_9_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_12_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_15_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_18_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_24_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_27_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_30_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_33_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_36_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_388_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_391_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_394_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_397_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_400_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_1074_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_1077_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_1080_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_1083_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_1086_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_24_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_54_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_84_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_114_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_98_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_128_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_158_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_188_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_172_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_202_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_232_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_262_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_246_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_276_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_306_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_336_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_320_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_350_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_380_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_410_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_394_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_424_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_454_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_484_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_468_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_452_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_943_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_782_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_76_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_65_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_389_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_452_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_124_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_140_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_671_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_690_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_551_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_523_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_223_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_201_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_350_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_389_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_468_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_523_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_223_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_350_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_957_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_943_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_124_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_76_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_671_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_551_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_957_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_1068_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_690_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_747_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_201_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_140_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_747_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_782_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_1102_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_1068_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_60_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_63_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_66_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_69_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_72_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_64_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_67_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_70_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_73_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_76_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_69_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_72_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_75_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_78_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_81_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_119_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_122_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_125_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_128_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_131_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_126_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_129_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_132_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_135_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_138_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_133_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_136_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_139_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_142_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_145_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_198_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_201_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_204_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_207_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_210_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_206_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_209_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_212_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_215_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_218_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_214_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_217_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_220_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_223_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_226_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_348_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_351_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_354_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_357_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_360_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_364_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_367_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_370_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_373_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_376_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_379_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_382_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_385_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_388_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_391_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_448_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_451_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_454_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_457_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_460_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_454_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_457_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_460_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_463_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_466_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_460_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_463_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_466_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_469_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_472_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_520_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_523_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_526_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_529_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_532_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_531_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_534_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_537_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_540_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_543_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_542_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_545_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_548_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_551_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_554_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_667_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_670_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_673_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_676_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_679_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_674_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_677_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_680_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_683_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_686_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_682_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_685_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_688_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_691_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_694_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_745_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_748_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_751_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_754_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_757_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_758_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_761_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_764_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_767_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_770_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_772_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_775_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_778_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_781_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_784_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_938_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_941_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_944_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_947_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_950_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_944_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_947_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_950_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_953_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_956_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_950_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_953_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_956_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_959_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_962_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_1066_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_1069_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_1072_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_1075_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_1078_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_1079_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_1082_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_1085_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_1088_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_1091_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_1092_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_1095_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_1098_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_1101_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_1104_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_390_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_370_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_349_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_537_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_522_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_552_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_452_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_468_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_460_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_783_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_746_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_764_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_132_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_123_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_141_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_691_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_680_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_670_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_212_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_224_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_200_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_0057_user_0016_scene_0002_cfg_0002_1085_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_1103_036422060215.jpg", + "images/task_0057_user_0016_scene_0002_cfg_0002_1067_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0057", + "split": "eval", + "split_group_id": "task_0057_user_0016_scene_0002_cfg_0002", + "recording_id": "task_0057_user_0016_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0058_user_0007_scene_0002_cfg_0002_29_036422060215.jpg", + "images/task_0058_user_0007_scene_0002_cfg_0002_32_036422060215.jpg", + "images/task_0058_user_0007_scene_0002_cfg_0002_35_036422060215.jpg", + "images/task_0058_user_0007_scene_0002_cfg_0002_38_036422060215.jpg", + "images/task_0058_user_0007_scene_0002_cfg_0002_41_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0058", + "split": "eval", + "split_group_id": "task_0058_user_0007_scene_0002_cfg_0002", + "recording_id": "task_0058_user_0007_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0058_user_0007_scene_0002_cfg_0002_65_036422060215.jpg", + "images/task_0058_user_0007_scene_0002_cfg_0002_68_036422060215.jpg", + "images/task_0058_user_0007_scene_0002_cfg_0002_71_036422060215.jpg", + "images/task_0058_user_0007_scene_0002_cfg_0002_74_036422060215.jpg", + "images/task_0058_user_0007_scene_0002_cfg_0002_77_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0058", + "split": "eval", + "split_group_id": "task_0058_user_0007_scene_0002_cfg_0002", + "recording_id": "task_0058_user_0007_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0058_user_0007_scene_0002_cfg_0002_89_036422060215.jpg", + "images/task_0058_user_0007_scene_0002_cfg_0002_92_036422060215.jpg", + "images/task_0058_user_0007_scene_0002_cfg_0002_95_036422060215.jpg", + "images/task_0058_user_0007_scene_0002_cfg_0002_98_036422060215.jpg", + "images/task_0058_user_0007_scene_0002_cfg_0002_101_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0058", + "split": "eval", + "split_group_id": "task_0058_user_0007_scene_0002_cfg_0002", + "recording_id": "task_0058_user_0007_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0058_user_0007_scene_0002_cfg_0002_388_036422060215.jpg", + "images/task_0058_user_0007_scene_0002_cfg_0002_391_036422060215.jpg", + "images/task_0058_user_0007_scene_0002_cfg_0002_394_036422060215.jpg", + "images/task_0058_user_0007_scene_0002_cfg_0002_397_036422060215.jpg", + "images/task_0058_user_0007_scene_0002_cfg_0002_400_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0058", + "split": "eval", + "split_group_id": "task_0058_user_0007_scene_0002_cfg_0002", + "recording_id": "task_0058_user_0007_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0058_user_0007_scene_0002_cfg_0002_413_036422060215.jpg", + "images/task_0058_user_0007_scene_0002_cfg_0002_416_036422060215.jpg", + "images/task_0058_user_0007_scene_0002_cfg_0002_419_036422060215.jpg", + "images/task_0058_user_0007_scene_0002_cfg_0002_422_036422060215.jpg", + "images/task_0058_user_0007_scene_0002_cfg_0002_425_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0058", + "split": "eval", + "split_group_id": "task_0058_user_0007_scene_0002_cfg_0002", + "recording_id": "task_0058_user_0007_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0058_user_0007_scene_0002_cfg_0002_9_036422060215.jpg", + "images/task_0058_user_0007_scene_0002_cfg_0002_23_036422060215.jpg", + "images/task_0058_user_0007_scene_0002_cfg_0002_37_036422060215.jpg", + "images/task_0058_user_0007_scene_0002_cfg_0002_51_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0058", + "split": "eval", + "split_group_id": "task_0058_user_0007_scene_0002_cfg_0002", + "recording_id": "task_0058_user_0007_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0058_user_0007_scene_0002_cfg_0002_37_036422060215.jpg", + "images/task_0058_user_0007_scene_0002_cfg_0002_51_036422060215.jpg", + "images/task_0058_user_0007_scene_0002_cfg_0002_65_036422060215.jpg", + "images/task_0058_user_0007_scene_0002_cfg_0002_79_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0058", + "split": "eval", + "split_group_id": "task_0058_user_0007_scene_0002_cfg_0002", + "recording_id": "task_0058_user_0007_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0058_user_0007_scene_0002_cfg_0002_65_036422060215.jpg", + "images/task_0058_user_0007_scene_0002_cfg_0002_79_036422060215.jpg", + "images/task_0058_user_0007_scene_0002_cfg_0002_93_036422060215.jpg", + "images/task_0058_user_0007_scene_0002_cfg_0002_107_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0058", + "split": "eval", + "split_group_id": "task_0058_user_0007_scene_0002_cfg_0002", + "recording_id": "task_0058_user_0007_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0058_user_0007_scene_0002_cfg_0002_93_036422060215.jpg", + "images/task_0058_user_0007_scene_0002_cfg_0002_107_036422060215.jpg", + "images/task_0058_user_0007_scene_0002_cfg_0002_121_036422060215.jpg", + "images/task_0058_user_0007_scene_0002_cfg_0002_135_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0058", + "split": "eval", + "split_group_id": "task_0058_user_0007_scene_0002_cfg_0002", + "recording_id": "task_0058_user_0007_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0058_user_0007_scene_0002_cfg_0002_121_036422060215.jpg", + "images/task_0058_user_0007_scene_0002_cfg_0002_135_036422060215.jpg", + "images/task_0058_user_0007_scene_0002_cfg_0002_149_036422060215.jpg", + "images/task_0058_user_0007_scene_0002_cfg_0002_163_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0058", + "split": "eval", + "split_group_id": "task_0058_user_0007_scene_0002_cfg_0002", + "recording_id": "task_0058_user_0007_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0058_user_0007_scene_0002_cfg_0002_149_036422060215.jpg", + "images/task_0058_user_0007_scene_0002_cfg_0002_163_036422060215.jpg", + "images/task_0058_user_0007_scene_0002_cfg_0002_177_036422060215.jpg", + "images/task_0058_user_0007_scene_0002_cfg_0002_191_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0058", + "split": "eval", + "split_group_id": "task_0058_user_0007_scene_0002_cfg_0002", + "recording_id": "task_0058_user_0007_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_0058_user_0007_scene_0002_cfg_0002_136_036422060215.jpg", + "images/task_0058_user_0007_scene_0002_cfg_0002_122_036422060215.jpg", + "images/task_0058_user_0007_scene_0002_cfg_0002_109_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0058", + "split": "eval", + "split_group_id": "task_0058_user_0007_scene_0002_cfg_0002", + "recording_id": "task_0058_user_0007_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_0058_user_0007_scene_0002_cfg_0002_402_036422060215.jpg", + "images/task_0058_user_0007_scene_0002_cfg_0002_323_036422060215.jpg", + "images/task_0058_user_0007_scene_0002_cfg_0002_362_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0058", + "split": "eval", + "split_group_id": "task_0058_user_0007_scene_0002_cfg_0002", + "recording_id": "task_0058_user_0007_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0058_user_0007_scene_0008_cfg_0002_6_036422060215.jpg", + "images/task_0058_user_0007_scene_0008_cfg_0002_9_036422060215.jpg", + "images/task_0058_user_0007_scene_0008_cfg_0002_12_036422060215.jpg", + "images/task_0058_user_0007_scene_0008_cfg_0002_15_036422060215.jpg", + "images/task_0058_user_0007_scene_0008_cfg_0002_18_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0058", + "split": "eval", + "split_group_id": "task_0058_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0058_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0058_user_0007_scene_0008_cfg_0002_24_036422060215.jpg", + "images/task_0058_user_0007_scene_0008_cfg_0002_27_036422060215.jpg", + "images/task_0058_user_0007_scene_0008_cfg_0002_30_036422060215.jpg", + "images/task_0058_user_0007_scene_0008_cfg_0002_33_036422060215.jpg", + "images/task_0058_user_0007_scene_0008_cfg_0002_36_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0058", + "split": "eval", + "split_group_id": "task_0058_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0058_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0058_user_0007_scene_0008_cfg_0002_240_036422060215.jpg", + "images/task_0058_user_0007_scene_0008_cfg_0002_243_036422060215.jpg", + "images/task_0058_user_0007_scene_0008_cfg_0002_246_036422060215.jpg", + "images/task_0058_user_0007_scene_0008_cfg_0002_249_036422060215.jpg", + "images/task_0058_user_0007_scene_0008_cfg_0002_252_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0058", + "split": "eval", + "split_group_id": "task_0058_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0058_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0058_user_0007_scene_0008_cfg_0002_315_036422060215.jpg", + "images/task_0058_user_0007_scene_0008_cfg_0002_318_036422060215.jpg", + "images/task_0058_user_0007_scene_0008_cfg_0002_321_036422060215.jpg", + "images/task_0058_user_0007_scene_0008_cfg_0002_324_036422060215.jpg", + "images/task_0058_user_0007_scene_0008_cfg_0002_327_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0058", + "split": "eval", + "split_group_id": "task_0058_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0058_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0058_user_0007_scene_0008_cfg_0002_378_036422060215.jpg", + "images/task_0058_user_0007_scene_0008_cfg_0002_381_036422060215.jpg", + "images/task_0058_user_0007_scene_0008_cfg_0002_384_036422060215.jpg", + "images/task_0058_user_0007_scene_0008_cfg_0002_387_036422060215.jpg", + "images/task_0058_user_0007_scene_0008_cfg_0002_390_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0058", + "split": "eval", + "split_group_id": "task_0058_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0058_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0058_user_0007_scene_0008_cfg_0002_8_036422060215.jpg", + "images/task_0058_user_0007_scene_0008_cfg_0002_21_036422060215.jpg", + "images/task_0058_user_0007_scene_0008_cfg_0002_34_036422060215.jpg", + "images/task_0058_user_0007_scene_0008_cfg_0002_47_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0058", + "split": "eval", + "split_group_id": "task_0058_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0058_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0058_user_0007_scene_0008_cfg_0002_34_036422060215.jpg", + "images/task_0058_user_0007_scene_0008_cfg_0002_47_036422060215.jpg", + "images/task_0058_user_0007_scene_0008_cfg_0002_60_036422060215.jpg", + "images/task_0058_user_0007_scene_0008_cfg_0002_73_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0058", + "split": "eval", + "split_group_id": "task_0058_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0058_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0058_user_0007_scene_0008_cfg_0002_60_036422060215.jpg", + "images/task_0058_user_0007_scene_0008_cfg_0002_73_036422060215.jpg", + "images/task_0058_user_0007_scene_0008_cfg_0002_86_036422060215.jpg", + "images/task_0058_user_0007_scene_0008_cfg_0002_99_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0058", + "split": "eval", + "split_group_id": "task_0058_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0058_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0058_user_0007_scene_0008_cfg_0002_86_036422060215.jpg", + "images/task_0058_user_0007_scene_0008_cfg_0002_99_036422060215.jpg", + "images/task_0058_user_0007_scene_0008_cfg_0002_112_036422060215.jpg", + "images/task_0058_user_0007_scene_0008_cfg_0002_125_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0058", + "split": "eval", + "split_group_id": "task_0058_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0058_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0058_user_0007_scene_0008_cfg_0002_112_036422060215.jpg", + "images/task_0058_user_0007_scene_0008_cfg_0002_125_036422060215.jpg", + "images/task_0058_user_0007_scene_0008_cfg_0002_138_036422060215.jpg", + "images/task_0058_user_0007_scene_0008_cfg_0002_151_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0058", + "split": "eval", + "split_group_id": "task_0058_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0058_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0058_user_0007_scene_0008_cfg_0002_138_036422060215.jpg", + "images/task_0058_user_0007_scene_0008_cfg_0002_151_036422060215.jpg", + "images/task_0058_user_0007_scene_0008_cfg_0002_164_036422060215.jpg", + "images/task_0058_user_0007_scene_0008_cfg_0002_177_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0058", + "split": "eval", + "split_group_id": "task_0058_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0058_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_0058_user_0007_scene_0008_cfg_0002_375_036422060215.jpg", + "images/task_0058_user_0007_scene_0008_cfg_0002_278_036422060215.jpg", + "images/task_0058_user_0007_scene_0008_cfg_0002_326_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0058", + "split": "eval", + "split_group_id": "task_0058_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0058_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_0058_user_0007_scene_0008_cfg_0002_91_036422060215.jpg", + "images/task_0058_user_0007_scene_0008_cfg_0002_78_036422060215.jpg", + "images/task_0058_user_0007_scene_0008_cfg_0002_64_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0058", + "split": "eval", + "split_group_id": "task_0058_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0058_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0059_user_0007_scene_0001_cfg_0002_6_036422060215.jpg", + "images/task_0059_user_0007_scene_0001_cfg_0002_9_036422060215.jpg", + "images/task_0059_user_0007_scene_0001_cfg_0002_12_036422060215.jpg", + "images/task_0059_user_0007_scene_0001_cfg_0002_15_036422060215.jpg", + "images/task_0059_user_0007_scene_0001_cfg_0002_18_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0059", + "split": "eval", + "split_group_id": "task_0059_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0059_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0059_user_0007_scene_0001_cfg_0002_42_036422060215.jpg", + "images/task_0059_user_0007_scene_0001_cfg_0002_45_036422060215.jpg", + "images/task_0059_user_0007_scene_0001_cfg_0002_48_036422060215.jpg", + "images/task_0059_user_0007_scene_0001_cfg_0002_51_036422060215.jpg", + "images/task_0059_user_0007_scene_0001_cfg_0002_54_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0059", + "split": "eval", + "split_group_id": "task_0059_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0059_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0059_user_0007_scene_0001_cfg_0002_79_036422060215.jpg", + "images/task_0059_user_0007_scene_0001_cfg_0002_82_036422060215.jpg", + "images/task_0059_user_0007_scene_0001_cfg_0002_85_036422060215.jpg", + "images/task_0059_user_0007_scene_0001_cfg_0002_88_036422060215.jpg", + "images/task_0059_user_0007_scene_0001_cfg_0002_91_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0059", + "split": "eval", + "split_group_id": "task_0059_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0059_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0059_user_0007_scene_0001_cfg_0002_97_036422060215.jpg", + "images/task_0059_user_0007_scene_0001_cfg_0002_100_036422060215.jpg", + "images/task_0059_user_0007_scene_0001_cfg_0002_103_036422060215.jpg", + "images/task_0059_user_0007_scene_0001_cfg_0002_106_036422060215.jpg", + "images/task_0059_user_0007_scene_0001_cfg_0002_109_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0059", + "split": "eval", + "split_group_id": "task_0059_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0059_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0059_user_0007_scene_0001_cfg_0002_415_036422060215.jpg", + "images/task_0059_user_0007_scene_0001_cfg_0002_418_036422060215.jpg", + "images/task_0059_user_0007_scene_0001_cfg_0002_421_036422060215.jpg", + "images/task_0059_user_0007_scene_0001_cfg_0002_424_036422060215.jpg", + "images/task_0059_user_0007_scene_0001_cfg_0002_427_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0059", + "split": "eval", + "split_group_id": "task_0059_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0059_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0059_user_0007_scene_0001_cfg_0002_541_036422060215.jpg", + "images/task_0059_user_0007_scene_0001_cfg_0002_544_036422060215.jpg", + "images/task_0059_user_0007_scene_0001_cfg_0002_547_036422060215.jpg", + "images/task_0059_user_0007_scene_0001_cfg_0002_550_036422060215.jpg", + "images/task_0059_user_0007_scene_0001_cfg_0002_553_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0059", + "split": "eval", + "split_group_id": "task_0059_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0059_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0059_user_0007_scene_0001_cfg_0002_14_036422060215.jpg", + "images/task_0059_user_0007_scene_0001_cfg_0002_34_036422060215.jpg", + "images/task_0059_user_0007_scene_0001_cfg_0002_54_036422060215.jpg", + "images/task_0059_user_0007_scene_0001_cfg_0002_74_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0059", + "split": "eval", + "split_group_id": "task_0059_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0059_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0059_user_0007_scene_0001_cfg_0002_54_036422060215.jpg", + "images/task_0059_user_0007_scene_0001_cfg_0002_74_036422060215.jpg", + "images/task_0059_user_0007_scene_0001_cfg_0002_94_036422060215.jpg", + "images/task_0059_user_0007_scene_0001_cfg_0002_114_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0059", + "split": "eval", + "split_group_id": "task_0059_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0059_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0059_user_0007_scene_0001_cfg_0002_94_036422060215.jpg", + "images/task_0059_user_0007_scene_0001_cfg_0002_114_036422060215.jpg", + "images/task_0059_user_0007_scene_0001_cfg_0002_134_036422060215.jpg", + "images/task_0059_user_0007_scene_0001_cfg_0002_154_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0059", + "split": "eval", + "split_group_id": "task_0059_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0059_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0059_user_0007_scene_0001_cfg_0002_134_036422060215.jpg", + "images/task_0059_user_0007_scene_0001_cfg_0002_154_036422060215.jpg", + "images/task_0059_user_0007_scene_0001_cfg_0002_174_036422060215.jpg", + "images/task_0059_user_0007_scene_0001_cfg_0002_194_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0059", + "split": "eval", + "split_group_id": "task_0059_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0059_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0059_user_0007_scene_0001_cfg_0002_174_036422060215.jpg", + "images/task_0059_user_0007_scene_0001_cfg_0002_194_036422060215.jpg", + "images/task_0059_user_0007_scene_0001_cfg_0002_214_036422060215.jpg", + "images/task_0059_user_0007_scene_0001_cfg_0002_234_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0059", + "split": "eval", + "split_group_id": "task_0059_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0059_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0059_user_0007_scene_0001_cfg_0002_214_036422060215.jpg", + "images/task_0059_user_0007_scene_0001_cfg_0002_234_036422060215.jpg", + "images/task_0059_user_0007_scene_0001_cfg_0002_254_036422060215.jpg", + "images/task_0059_user_0007_scene_0001_cfg_0002_274_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0059", + "split": "eval", + "split_group_id": "task_0059_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0059_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_0059_user_0007_scene_0001_cfg_0002_492_036422060215.jpg", + "images/task_0059_user_0007_scene_0001_cfg_0002_533_036422060215.jpg", + "images/task_0059_user_0007_scene_0001_cfg_0002_452_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0059", + "split": "eval", + "split_group_id": "task_0059_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0059_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_0059_user_0007_scene_0001_cfg_0002_81_036422060215.jpg", + "images/task_0059_user_0007_scene_0001_cfg_0002_108_036422060215.jpg", + "images/task_0059_user_0007_scene_0001_cfg_0002_94_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0059", + "split": "eval", + "split_group_id": "task_0059_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0059_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0059_user_0007_scene_0003_cfg_0002_288_036422060215.jpg", + "images/task_0059_user_0007_scene_0003_cfg_0002_291_036422060215.jpg", + "images/task_0059_user_0007_scene_0003_cfg_0002_294_036422060215.jpg", + "images/task_0059_user_0007_scene_0003_cfg_0002_297_036422060215.jpg", + "images/task_0059_user_0007_scene_0003_cfg_0002_300_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0059", + "split": "eval", + "split_group_id": "task_0059_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0059_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0059_user_0007_scene_0003_cfg_0002_324_036422060215.jpg", + "images/task_0059_user_0007_scene_0003_cfg_0002_327_036422060215.jpg", + "images/task_0059_user_0007_scene_0003_cfg_0002_330_036422060215.jpg", + "images/task_0059_user_0007_scene_0003_cfg_0002_333_036422060215.jpg", + "images/task_0059_user_0007_scene_0003_cfg_0002_336_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0059", + "split": "eval", + "split_group_id": "task_0059_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0059_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0059_user_0007_scene_0003_cfg_0002_356_036422060215.jpg", + "images/task_0059_user_0007_scene_0003_cfg_0002_359_036422060215.jpg", + "images/task_0059_user_0007_scene_0003_cfg_0002_362_036422060215.jpg", + "images/task_0059_user_0007_scene_0003_cfg_0002_365_036422060215.jpg", + "images/task_0059_user_0007_scene_0003_cfg_0002_368_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0059", + "split": "eval", + "split_group_id": "task_0059_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0059_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0059_user_0007_scene_0003_cfg_0002_464_036422060215.jpg", + "images/task_0059_user_0007_scene_0003_cfg_0002_467_036422060215.jpg", + "images/task_0059_user_0007_scene_0003_cfg_0002_470_036422060215.jpg", + "images/task_0059_user_0007_scene_0003_cfg_0002_473_036422060215.jpg", + "images/task_0059_user_0007_scene_0003_cfg_0002_476_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0059", + "split": "eval", + "split_group_id": "task_0059_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0059_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0059_user_0007_scene_0003_cfg_0002_12_036422060215.jpg", + "images/task_0059_user_0007_scene_0003_cfg_0002_30_036422060215.jpg", + "images/task_0059_user_0007_scene_0003_cfg_0002_48_036422060215.jpg", + "images/task_0059_user_0007_scene_0003_cfg_0002_66_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0059", + "split": "eval", + "split_group_id": "task_0059_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0059_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0059_user_0007_scene_0003_cfg_0002_48_036422060215.jpg", + "images/task_0059_user_0007_scene_0003_cfg_0002_66_036422060215.jpg", + "images/task_0059_user_0007_scene_0003_cfg_0002_84_036422060215.jpg", + "images/task_0059_user_0007_scene_0003_cfg_0002_102_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0059", + "split": "eval", + "split_group_id": "task_0059_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0059_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0059_user_0007_scene_0003_cfg_0002_84_036422060215.jpg", + "images/task_0059_user_0007_scene_0003_cfg_0002_102_036422060215.jpg", + "images/task_0059_user_0007_scene_0003_cfg_0002_120_036422060215.jpg", + "images/task_0059_user_0007_scene_0003_cfg_0002_138_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0059", + "split": "eval", + "split_group_id": "task_0059_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0059_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0059_user_0007_scene_0003_cfg_0002_120_036422060215.jpg", + "images/task_0059_user_0007_scene_0003_cfg_0002_138_036422060215.jpg", + "images/task_0059_user_0007_scene_0003_cfg_0002_156_036422060215.jpg", + "images/task_0059_user_0007_scene_0003_cfg_0002_174_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0059", + "split": "eval", + "split_group_id": "task_0059_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0059_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0059_user_0007_scene_0003_cfg_0002_156_036422060215.jpg", + "images/task_0059_user_0007_scene_0003_cfg_0002_174_036422060215.jpg", + "images/task_0059_user_0007_scene_0003_cfg_0002_192_036422060215.jpg", + "images/task_0059_user_0007_scene_0003_cfg_0002_210_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0059", + "split": "eval", + "split_group_id": "task_0059_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0059_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0059_user_0007_scene_0003_cfg_0002_192_036422060215.jpg", + "images/task_0059_user_0007_scene_0003_cfg_0002_210_036422060215.jpg", + "images/task_0059_user_0007_scene_0003_cfg_0002_228_036422060215.jpg", + "images/task_0059_user_0007_scene_0003_cfg_0002_246_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0059", + "split": "eval", + "split_group_id": "task_0059_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0059_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_0059_user_0007_scene_0003_cfg_0002_392_036422060215.jpg", + "images/task_0059_user_0007_scene_0003_cfg_0002_432_036422060215.jpg", + "images/task_0059_user_0007_scene_0003_cfg_0002_472_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0059", + "split": "eval", + "split_group_id": "task_0059_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0059_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0059_user_0007_scene_0004_cfg_0002_382_036422060215.jpg", + "images/task_0059_user_0007_scene_0004_cfg_0002_385_036422060215.jpg", + "images/task_0059_user_0007_scene_0004_cfg_0002_388_036422060215.jpg", + "images/task_0059_user_0007_scene_0004_cfg_0002_391_036422060215.jpg", + "images/task_0059_user_0007_scene_0004_cfg_0002_394_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0059", + "split": "eval", + "split_group_id": "task_0059_user_0007_scene_0004_cfg_0002", + "recording_id": "task_0059_user_0007_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0059_user_0007_scene_0004_cfg_0002_418_036422060215.jpg", + "images/task_0059_user_0007_scene_0004_cfg_0002_421_036422060215.jpg", + "images/task_0059_user_0007_scene_0004_cfg_0002_424_036422060215.jpg", + "images/task_0059_user_0007_scene_0004_cfg_0002_427_036422060215.jpg", + "images/task_0059_user_0007_scene_0004_cfg_0002_430_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0059", + "split": "eval", + "split_group_id": "task_0059_user_0007_scene_0004_cfg_0002", + "recording_id": "task_0059_user_0007_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0059_user_0007_scene_0004_cfg_0002_450_036422060215.jpg", + "images/task_0059_user_0007_scene_0004_cfg_0002_453_036422060215.jpg", + "images/task_0059_user_0007_scene_0004_cfg_0002_456_036422060215.jpg", + "images/task_0059_user_0007_scene_0004_cfg_0002_459_036422060215.jpg", + "images/task_0059_user_0007_scene_0004_cfg_0002_462_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0059", + "split": "eval", + "split_group_id": "task_0059_user_0007_scene_0004_cfg_0002", + "recording_id": "task_0059_user_0007_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0059_user_0007_scene_0004_cfg_0002_576_036422060215.jpg", + "images/task_0059_user_0007_scene_0004_cfg_0002_579_036422060215.jpg", + "images/task_0059_user_0007_scene_0004_cfg_0002_582_036422060215.jpg", + "images/task_0059_user_0007_scene_0004_cfg_0002_585_036422060215.jpg", + "images/task_0059_user_0007_scene_0004_cfg_0002_588_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0059", + "split": "eval", + "split_group_id": "task_0059_user_0007_scene_0004_cfg_0002", + "recording_id": "task_0059_user_0007_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0059_user_0007_scene_0004_cfg_0002_14_036422060215.jpg", + "images/task_0059_user_0007_scene_0004_cfg_0002_35_036422060215.jpg", + "images/task_0059_user_0007_scene_0004_cfg_0002_56_036422060215.jpg", + "images/task_0059_user_0007_scene_0004_cfg_0002_77_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0059", + "split": "eval", + "split_group_id": "task_0059_user_0007_scene_0004_cfg_0002", + "recording_id": "task_0059_user_0007_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0059_user_0007_scene_0004_cfg_0002_56_036422060215.jpg", + "images/task_0059_user_0007_scene_0004_cfg_0002_77_036422060215.jpg", + "images/task_0059_user_0007_scene_0004_cfg_0002_98_036422060215.jpg", + "images/task_0059_user_0007_scene_0004_cfg_0002_119_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0059", + "split": "eval", + "split_group_id": "task_0059_user_0007_scene_0004_cfg_0002", + "recording_id": "task_0059_user_0007_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0059_user_0007_scene_0004_cfg_0002_98_036422060215.jpg", + "images/task_0059_user_0007_scene_0004_cfg_0002_119_036422060215.jpg", + "images/task_0059_user_0007_scene_0004_cfg_0002_140_036422060215.jpg", + "images/task_0059_user_0007_scene_0004_cfg_0002_161_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0059", + "split": "eval", + "split_group_id": "task_0059_user_0007_scene_0004_cfg_0002", + "recording_id": "task_0059_user_0007_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0059_user_0007_scene_0004_cfg_0002_140_036422060215.jpg", + "images/task_0059_user_0007_scene_0004_cfg_0002_161_036422060215.jpg", + "images/task_0059_user_0007_scene_0004_cfg_0002_182_036422060215.jpg", + "images/task_0059_user_0007_scene_0004_cfg_0002_203_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0059", + "split": "eval", + "split_group_id": "task_0059_user_0007_scene_0004_cfg_0002", + "recording_id": "task_0059_user_0007_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0059_user_0007_scene_0004_cfg_0002_182_036422060215.jpg", + "images/task_0059_user_0007_scene_0004_cfg_0002_203_036422060215.jpg", + "images/task_0059_user_0007_scene_0004_cfg_0002_224_036422060215.jpg", + "images/task_0059_user_0007_scene_0004_cfg_0002_245_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0059", + "split": "eval", + "split_group_id": "task_0059_user_0007_scene_0004_cfg_0002", + "recording_id": "task_0059_user_0007_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0059_user_0007_scene_0004_cfg_0002_224_036422060215.jpg", + "images/task_0059_user_0007_scene_0004_cfg_0002_245_036422060215.jpg", + "images/task_0059_user_0007_scene_0004_cfg_0002_266_036422060215.jpg", + "images/task_0059_user_0007_scene_0004_cfg_0002_287_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0059", + "split": "eval", + "split_group_id": "task_0059_user_0007_scene_0004_cfg_0002", + "recording_id": "task_0059_user_0007_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_0059_user_0007_scene_0004_cfg_0002_486_036422060215.jpg", + "images/task_0059_user_0007_scene_0004_cfg_0002_525_036422060215.jpg", + "images/task_0059_user_0007_scene_0004_cfg_0002_564_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0059", + "split": "eval", + "split_group_id": "task_0059_user_0007_scene_0004_cfg_0002", + "recording_id": "task_0059_user_0007_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0060_user_0007_scene_0004_cfg_0002_8_036422060215.jpg", + "images/task_0060_user_0007_scene_0004_cfg_0002_20_036422060215.jpg", + "images/task_0060_user_0007_scene_0004_cfg_0002_32_036422060215.jpg", + "images/task_0060_user_0007_scene_0004_cfg_0002_44_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0060", + "split": "eval", + "split_group_id": "task_0060_user_0007_scene_0004_cfg_0002", + "recording_id": "task_0060_user_0007_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0060_user_0007_scene_0004_cfg_0002_20_036422060215.jpg", + "images/task_0060_user_0007_scene_0004_cfg_0002_32_036422060215.jpg", + "images/task_0060_user_0007_scene_0004_cfg_0002_44_036422060215.jpg", + "images/task_0060_user_0007_scene_0004_cfg_0002_56_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0060", + "split": "eval", + "split_group_id": "task_0060_user_0007_scene_0004_cfg_0002", + "recording_id": "task_0060_user_0007_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0060_user_0007_scene_0004_cfg_0002_32_036422060215.jpg", + "images/task_0060_user_0007_scene_0004_cfg_0002_44_036422060215.jpg", + "images/task_0060_user_0007_scene_0004_cfg_0002_56_036422060215.jpg", + "images/task_0060_user_0007_scene_0004_cfg_0002_68_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0060", + "split": "eval", + "split_group_id": "task_0060_user_0007_scene_0004_cfg_0002", + "recording_id": "task_0060_user_0007_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0060_user_0007_scene_0004_cfg_0002_44_036422060215.jpg", + "images/task_0060_user_0007_scene_0004_cfg_0002_56_036422060215.jpg", + "images/task_0060_user_0007_scene_0004_cfg_0002_68_036422060215.jpg", + "images/task_0060_user_0007_scene_0004_cfg_0002_80_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0060", + "split": "eval", + "split_group_id": "task_0060_user_0007_scene_0004_cfg_0002", + "recording_id": "task_0060_user_0007_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0060_user_0007_scene_0004_cfg_0002_56_036422060215.jpg", + "images/task_0060_user_0007_scene_0004_cfg_0002_68_036422060215.jpg", + "images/task_0060_user_0007_scene_0004_cfg_0002_80_036422060215.jpg", + "images/task_0060_user_0007_scene_0004_cfg_0002_92_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0060", + "split": "eval", + "split_group_id": "task_0060_user_0007_scene_0004_cfg_0002", + "recording_id": "task_0060_user_0007_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0060_user_0007_scene_0004_cfg_0002_68_036422060215.jpg", + "images/task_0060_user_0007_scene_0004_cfg_0002_80_036422060215.jpg", + "images/task_0060_user_0007_scene_0004_cfg_0002_92_036422060215.jpg", + "images/task_0060_user_0007_scene_0004_cfg_0002_104_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0060", + "split": "eval", + "split_group_id": "task_0060_user_0007_scene_0004_cfg_0002", + "recording_id": "task_0060_user_0007_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0060_user_0007_scene_0004_cfg_0002_80_036422060215.jpg", + "images/task_0060_user_0007_scene_0004_cfg_0002_92_036422060215.jpg", + "images/task_0060_user_0007_scene_0004_cfg_0002_104_036422060215.jpg", + "images/task_0060_user_0007_scene_0004_cfg_0002_116_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0060", + "split": "eval", + "split_group_id": "task_0060_user_0007_scene_0004_cfg_0002", + "recording_id": "task_0060_user_0007_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0060_user_0007_scene_0004_cfg_0002_92_036422060215.jpg", + "images/task_0060_user_0007_scene_0004_cfg_0002_104_036422060215.jpg", + "images/task_0060_user_0007_scene_0004_cfg_0002_116_036422060215.jpg", + "images/task_0060_user_0007_scene_0004_cfg_0002_128_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0060", + "split": "eval", + "split_group_id": "task_0060_user_0007_scene_0004_cfg_0002", + "recording_id": "task_0060_user_0007_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0060_user_0007_scene_0004_cfg_0002_104_036422060215.jpg", + "images/task_0060_user_0007_scene_0004_cfg_0002_116_036422060215.jpg", + "images/task_0060_user_0007_scene_0004_cfg_0002_128_036422060215.jpg", + "images/task_0060_user_0007_scene_0004_cfg_0002_140_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0060", + "split": "eval", + "split_group_id": "task_0060_user_0007_scene_0004_cfg_0002", + "recording_id": "task_0060_user_0007_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0060_user_0007_scene_0004_cfg_0002_116_036422060215.jpg", + "images/task_0060_user_0007_scene_0004_cfg_0002_128_036422060215.jpg", + "images/task_0060_user_0007_scene_0004_cfg_0002_140_036422060215.jpg", + "images/task_0060_user_0007_scene_0004_cfg_0002_152_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0060", + "split": "eval", + "split_group_id": "task_0060_user_0007_scene_0004_cfg_0002", + "recording_id": "task_0060_user_0007_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0061_user_0007_scene_0001_cfg_0002_152_036422060215.jpg", + "images/task_0061_user_0007_scene_0001_cfg_0002_155_036422060215.jpg", + "images/task_0061_user_0007_scene_0001_cfg_0002_158_036422060215.jpg", + "images/task_0061_user_0007_scene_0001_cfg_0002_161_036422060215.jpg", + "images/task_0061_user_0007_scene_0001_cfg_0002_164_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0061", + "split": "eval", + "split_group_id": "task_0061_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0061_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0061_user_0007_scene_0001_cfg_0002_188_036422060215.jpg", + "images/task_0061_user_0007_scene_0001_cfg_0002_191_036422060215.jpg", + "images/task_0061_user_0007_scene_0001_cfg_0002_194_036422060215.jpg", + "images/task_0061_user_0007_scene_0001_cfg_0002_197_036422060215.jpg", + "images/task_0061_user_0007_scene_0001_cfg_0002_200_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0061", + "split": "eval", + "split_group_id": "task_0061_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0061_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0061_user_0007_scene_0001_cfg_0002_238_036422060215.jpg", + "images/task_0061_user_0007_scene_0001_cfg_0002_241_036422060215.jpg", + "images/task_0061_user_0007_scene_0001_cfg_0002_244_036422060215.jpg", + "images/task_0061_user_0007_scene_0001_cfg_0002_247_036422060215.jpg", + "images/task_0061_user_0007_scene_0001_cfg_0002_250_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0061", + "split": "eval", + "split_group_id": "task_0061_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0061_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0061_user_0007_scene_0001_cfg_0002_263_036422060215.jpg", + "images/task_0061_user_0007_scene_0001_cfg_0002_266_036422060215.jpg", + "images/task_0061_user_0007_scene_0001_cfg_0002_269_036422060215.jpg", + "images/task_0061_user_0007_scene_0001_cfg_0002_272_036422060215.jpg", + "images/task_0061_user_0007_scene_0001_cfg_0002_275_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0061", + "split": "eval", + "split_group_id": "task_0061_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0061_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0061_user_0007_scene_0001_cfg_0002_352_036422060215.jpg", + "images/task_0061_user_0007_scene_0001_cfg_0002_355_036422060215.jpg", + "images/task_0061_user_0007_scene_0001_cfg_0002_358_036422060215.jpg", + "images/task_0061_user_0007_scene_0001_cfg_0002_361_036422060215.jpg", + "images/task_0061_user_0007_scene_0001_cfg_0002_364_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0061", + "split": "eval", + "split_group_id": "task_0061_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0061_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0061_user_0007_scene_0001_cfg_0002_508_036422060215.jpg", + "images/task_0061_user_0007_scene_0001_cfg_0002_511_036422060215.jpg", + "images/task_0061_user_0007_scene_0001_cfg_0002_514_036422060215.jpg", + "images/task_0061_user_0007_scene_0001_cfg_0002_517_036422060215.jpg", + "images/task_0061_user_0007_scene_0001_cfg_0002_520_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0061", + "split": "eval", + "split_group_id": "task_0061_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0061_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0061_user_0007_scene_0001_cfg_0002_13_036422060215.jpg", + "images/task_0061_user_0007_scene_0001_cfg_0002_32_036422060215.jpg", + "images/task_0061_user_0007_scene_0001_cfg_0002_51_036422060215.jpg", + "images/task_0061_user_0007_scene_0001_cfg_0002_70_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0061", + "split": "eval", + "split_group_id": "task_0061_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0061_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0061_user_0007_scene_0001_cfg_0002_51_036422060215.jpg", + "images/task_0061_user_0007_scene_0001_cfg_0002_70_036422060215.jpg", + "images/task_0061_user_0007_scene_0001_cfg_0002_89_036422060215.jpg", + "images/task_0061_user_0007_scene_0001_cfg_0002_108_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0061", + "split": "eval", + "split_group_id": "task_0061_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0061_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0061_user_0007_scene_0001_cfg_0002_89_036422060215.jpg", + "images/task_0061_user_0007_scene_0001_cfg_0002_108_036422060215.jpg", + "images/task_0061_user_0007_scene_0001_cfg_0002_127_036422060215.jpg", + "images/task_0061_user_0007_scene_0001_cfg_0002_146_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0061", + "split": "eval", + "split_group_id": "task_0061_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0061_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0061_user_0007_scene_0001_cfg_0002_127_036422060215.jpg", + "images/task_0061_user_0007_scene_0001_cfg_0002_146_036422060215.jpg", + "images/task_0061_user_0007_scene_0001_cfg_0002_165_036422060215.jpg", + "images/task_0061_user_0007_scene_0001_cfg_0002_184_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0061", + "split": "eval", + "split_group_id": "task_0061_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0061_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0061_user_0007_scene_0001_cfg_0002_165_036422060215.jpg", + "images/task_0061_user_0007_scene_0001_cfg_0002_184_036422060215.jpg", + "images/task_0061_user_0007_scene_0001_cfg_0002_203_036422060215.jpg", + "images/task_0061_user_0007_scene_0001_cfg_0002_222_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0061", + "split": "eval", + "split_group_id": "task_0061_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0061_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0061_user_0007_scene_0001_cfg_0002_203_036422060215.jpg", + "images/task_0061_user_0007_scene_0001_cfg_0002_222_036422060215.jpg", + "images/task_0061_user_0007_scene_0001_cfg_0002_241_036422060215.jpg", + "images/task_0061_user_0007_scene_0001_cfg_0002_260_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0061", + "split": "eval", + "split_group_id": "task_0061_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0061_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_0061_user_0007_scene_0001_cfg_0002_304_036422060215.jpg", + "images/task_0061_user_0007_scene_0001_cfg_0002_347_036422060215.jpg", + "images/task_0061_user_0007_scene_0001_cfg_0002_260_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0061", + "split": "eval", + "split_group_id": "task_0061_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0061_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_0061_user_0007_scene_0001_cfg_0002_492_036422060215.jpg", + "images/task_0061_user_0007_scene_0001_cfg_0002_521_036422060215.jpg", + "images/task_0061_user_0007_scene_0001_cfg_0002_462_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0061", + "split": "eval", + "split_group_id": "task_0061_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0061_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0064_user_0007_scene_0005_cfg_0002_17_036422060215.jpg", + "images/task_0064_user_0007_scene_0005_cfg_0002_20_036422060215.jpg", + "images/task_0064_user_0007_scene_0005_cfg_0002_23_036422060215.jpg", + "images/task_0064_user_0007_scene_0005_cfg_0002_26_036422060215.jpg", + "images/task_0064_user_0007_scene_0005_cfg_0002_29_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0064", + "split": "eval", + "split_group_id": "task_0064_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0064_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0064_user_0007_scene_0005_cfg_0002_53_036422060215.jpg", + "images/task_0064_user_0007_scene_0005_cfg_0002_56_036422060215.jpg", + "images/task_0064_user_0007_scene_0005_cfg_0002_59_036422060215.jpg", + "images/task_0064_user_0007_scene_0005_cfg_0002_62_036422060215.jpg", + "images/task_0064_user_0007_scene_0005_cfg_0002_65_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0064", + "split": "eval", + "split_group_id": "task_0064_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0064_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0064_user_0007_scene_0005_cfg_0002_188_036422060215.jpg", + "images/task_0064_user_0007_scene_0005_cfg_0002_191_036422060215.jpg", + "images/task_0064_user_0007_scene_0005_cfg_0002_194_036422060215.jpg", + "images/task_0064_user_0007_scene_0005_cfg_0002_197_036422060215.jpg", + "images/task_0064_user_0007_scene_0005_cfg_0002_200_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0064", + "split": "eval", + "split_group_id": "task_0064_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0064_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0064_user_0007_scene_0005_cfg_0002_228_036422060215.jpg", + "images/task_0064_user_0007_scene_0005_cfg_0002_231_036422060215.jpg", + "images/task_0064_user_0007_scene_0005_cfg_0002_234_036422060215.jpg", + "images/task_0064_user_0007_scene_0005_cfg_0002_237_036422060215.jpg", + "images/task_0064_user_0007_scene_0005_cfg_0002_240_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0064", + "split": "eval", + "split_group_id": "task_0064_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0064_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0064_user_0007_scene_0005_cfg_0002_254_036422060215.jpg", + "images/task_0064_user_0007_scene_0005_cfg_0002_257_036422060215.jpg", + "images/task_0064_user_0007_scene_0005_cfg_0002_260_036422060215.jpg", + "images/task_0064_user_0007_scene_0005_cfg_0002_263_036422060215.jpg", + "images/task_0064_user_0007_scene_0005_cfg_0002_266_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0064", + "split": "eval", + "split_group_id": "task_0064_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0064_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0064_user_0007_scene_0005_cfg_0002_6_036422060215.jpg", + "images/task_0064_user_0007_scene_0005_cfg_0002_15_036422060215.jpg", + "images/task_0064_user_0007_scene_0005_cfg_0002_24_036422060215.jpg", + "images/task_0064_user_0007_scene_0005_cfg_0002_33_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0064", + "split": "eval", + "split_group_id": "task_0064_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0064_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0064_user_0007_scene_0005_cfg_0002_24_036422060215.jpg", + "images/task_0064_user_0007_scene_0005_cfg_0002_33_036422060215.jpg", + "images/task_0064_user_0007_scene_0005_cfg_0002_42_036422060215.jpg", + "images/task_0064_user_0007_scene_0005_cfg_0002_51_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0064", + "split": "eval", + "split_group_id": "task_0064_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0064_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0064_user_0007_scene_0005_cfg_0002_42_036422060215.jpg", + "images/task_0064_user_0007_scene_0005_cfg_0002_51_036422060215.jpg", + "images/task_0064_user_0007_scene_0005_cfg_0002_60_036422060215.jpg", + "images/task_0064_user_0007_scene_0005_cfg_0002_69_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0064", + "split": "eval", + "split_group_id": "task_0064_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0064_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0064_user_0007_scene_0005_cfg_0002_60_036422060215.jpg", + "images/task_0064_user_0007_scene_0005_cfg_0002_69_036422060215.jpg", + "images/task_0064_user_0007_scene_0005_cfg_0002_78_036422060215.jpg", + "images/task_0064_user_0007_scene_0005_cfg_0002_87_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0064", + "split": "eval", + "split_group_id": "task_0064_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0064_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0064_user_0007_scene_0005_cfg_0002_78_036422060215.jpg", + "images/task_0064_user_0007_scene_0005_cfg_0002_87_036422060215.jpg", + "images/task_0064_user_0007_scene_0005_cfg_0002_96_036422060215.jpg", + "images/task_0064_user_0007_scene_0005_cfg_0002_105_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0064", + "split": "eval", + "split_group_id": "task_0064_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0064_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0064_user_0007_scene_0005_cfg_0002_96_036422060215.jpg", + "images/task_0064_user_0007_scene_0005_cfg_0002_105_036422060215.jpg", + "images/task_0064_user_0007_scene_0005_cfg_0002_114_036422060215.jpg", + "images/task_0064_user_0007_scene_0005_cfg_0002_123_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0064", + "split": "eval", + "split_group_id": "task_0064_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0064_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_0064_user_0007_scene_0005_cfg_0002_128_036422060215.jpg", + "images/task_0064_user_0007_scene_0005_cfg_0002_175_036422060215.jpg", + "images/task_0064_user_0007_scene_0005_cfg_0002_222_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0064", + "split": "eval", + "split_group_id": "task_0064_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0064_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0064_user_0007_scene_0010_cfg_0002_34_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_37_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_40_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_43_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_46_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0064", + "split": "eval", + "split_group_id": "task_0064_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0064_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0064_user_0007_scene_0010_cfg_0002_70_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_73_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_76_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_79_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_82_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0064", + "split": "eval", + "split_group_id": "task_0064_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0064_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0064_user_0007_scene_0010_cfg_0002_94_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_97_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_100_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_103_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_106_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0064", + "split": "eval", + "split_group_id": "task_0064_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0064_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0064_user_0007_scene_0010_cfg_0002_325_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_328_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_331_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_334_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_337_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0064", + "split": "eval", + "split_group_id": "task_0064_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0064_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0064_user_0007_scene_0010_cfg_0002_352_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_355_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_358_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_361_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_364_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0064", + "split": "eval", + "split_group_id": "task_0064_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0064_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0064_user_0007_scene_0010_cfg_0002_388_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_391_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_394_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_397_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_400_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0064", + "split": "eval", + "split_group_id": "task_0064_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0064_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0064_user_0007_scene_0010_cfg_0002_411_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_414_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_417_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_420_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_423_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0064", + "split": "eval", + "split_group_id": "task_0064_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0064_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0064_user_0007_scene_0010_cfg_0002_10_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_25_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_40_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_55_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0064", + "split": "eval", + "split_group_id": "task_0064_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0064_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0064_user_0007_scene_0010_cfg_0002_40_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_55_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_70_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_85_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0064", + "split": "eval", + "split_group_id": "task_0064_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0064_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0064_user_0007_scene_0010_cfg_0002_70_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_85_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_100_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_115_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0064", + "split": "eval", + "split_group_id": "task_0064_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0064_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0064_user_0007_scene_0010_cfg_0002_100_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_115_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_130_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_145_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0064", + "split": "eval", + "split_group_id": "task_0064_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0064_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0064_user_0007_scene_0010_cfg_0002_130_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_145_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_160_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_175_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0064", + "split": "eval", + "split_group_id": "task_0064_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0064_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0064_user_0007_scene_0010_cfg_0002_160_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_175_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_190_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_205_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0064", + "split": "eval", + "split_group_id": "task_0064_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0064_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_0064_user_0007_scene_0010_cfg_0002_409_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_380_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_351_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0064", + "split": "eval", + "split_group_id": "task_0064_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0064_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_0064_user_0007_scene_0010_cfg_0002_109_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_118_036422060215.jpg", + "images/task_0064_user_0007_scene_0010_cfg_0002_128_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0064", + "split": "eval", + "split_group_id": "task_0064_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0064_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0065_user_0007_scene_0018_cfg_0002_7_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_10_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_13_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_16_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_19_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0065", + "split": "eval", + "split_group_id": "task_0065_user_0007_scene_0018_cfg_0002", + "recording_id": "task_0065_user_0007_scene_0018_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0065_user_0007_scene_0018_cfg_0002_25_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_28_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_31_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_34_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_37_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0065", + "split": "eval", + "split_group_id": "task_0065_user_0007_scene_0018_cfg_0002", + "recording_id": "task_0065_user_0007_scene_0018_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0065_user_0007_scene_0018_cfg_0002_44_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_47_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_50_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_53_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_56_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0065", + "split": "eval", + "split_group_id": "task_0065_user_0007_scene_0018_cfg_0002", + "recording_id": "task_0065_user_0007_scene_0018_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0065_user_0007_scene_0018_cfg_0002_2661_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_2664_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_2667_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_2670_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_2673_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0065", + "split": "eval", + "split_group_id": "task_0065_user_0007_scene_0018_cfg_0002", + "recording_id": "task_0065_user_0007_scene_0018_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0065_user_0007_scene_0018_cfg_0002_2686_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_2689_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_2692_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_2695_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_2698_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0065", + "split": "eval", + "split_group_id": "task_0065_user_0007_scene_0018_cfg_0002", + "recording_id": "task_0065_user_0007_scene_0018_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0065_user_0007_scene_0018_cfg_0002_54_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_84_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_114_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_144_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0065", + "split": "eval", + "split_group_id": "task_0065_user_0007_scene_0018_cfg_0002", + "recording_id": "task_0065_user_0007_scene_0018_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0065_user_0007_scene_0018_cfg_0002_218_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_248_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_278_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_308_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0065", + "split": "eval", + "split_group_id": "task_0065_user_0007_scene_0018_cfg_0002", + "recording_id": "task_0065_user_0007_scene_0018_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0065_user_0007_scene_0018_cfg_0002_464_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_494_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_524_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_554_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0065", + "split": "eval", + "split_group_id": "task_0065_user_0007_scene_0018_cfg_0002", + "recording_id": "task_0065_user_0007_scene_0018_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0065_user_0007_scene_0018_cfg_0002_628_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_658_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_688_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_718_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0065", + "split": "eval", + "split_group_id": "task_0065_user_0007_scene_0018_cfg_0002", + "recording_id": "task_0065_user_0007_scene_0018_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0065_user_0007_scene_0018_cfg_0002_874_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_904_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_934_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_964_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0065", + "split": "eval", + "split_group_id": "task_0065_user_0007_scene_0018_cfg_0002", + "recording_id": "task_0065_user_0007_scene_0018_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0065_user_0007_scene_0018_cfg_0002_1038_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_1068_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_1098_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_1128_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0065", + "split": "eval", + "split_group_id": "task_0065_user_0007_scene_0018_cfg_0002", + "recording_id": "task_0065_user_0007_scene_0018_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_0065_user_0007_scene_0018_cfg_0002_2662_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_2688_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_2675_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0065", + "split": "eval", + "split_group_id": "task_0065_user_0007_scene_0018_cfg_0002", + "recording_id": "task_0065_user_0007_scene_0018_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_0065_user_0007_scene_0018_cfg_0002_540_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_492_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_516_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0065", + "split": "eval", + "split_group_id": "task_0065_user_0007_scene_0018_cfg_0002", + "recording_id": "task_0065_user_0007_scene_0018_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_0065_user_0007_scene_0018_cfg_0002_76_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_63_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_90_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0065", + "split": "eval", + "split_group_id": "task_0065_user_0007_scene_0018_cfg_0002", + "recording_id": "task_0065_user_0007_scene_0018_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_0065_user_0007_scene_0018_cfg_0002_1251_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_1235_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_1243_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0065", + "split": "eval", + "split_group_id": "task_0065_user_0007_scene_0018_cfg_0002", + "recording_id": "task_0065_user_0007_scene_0018_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_0065_user_0007_scene_0018_cfg_0002_378_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_408_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_349_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0065", + "split": "eval", + "split_group_id": "task_0065_user_0007_scene_0018_cfg_0002", + "recording_id": "task_0065_user_0007_scene_0018_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_0065_user_0007_scene_0018_cfg_0002_2416_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_2390_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_2403_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0065", + "split": "eval", + "split_group_id": "task_0065_user_0007_scene_0018_cfg_0002", + "recording_id": "task_0065_user_0007_scene_0018_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_0065_user_0007_scene_0018_cfg_0002_1143_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_1106_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_1124_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0065", + "split": "eval", + "split_group_id": "task_0065_user_0007_scene_0018_cfg_0002", + "recording_id": "task_0065_user_0007_scene_0018_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_0065_user_0007_scene_0018_cfg_0002_1666_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_1682_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_1699_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0065", + "split": "eval", + "split_group_id": "task_0065_user_0007_scene_0018_cfg_0002", + "recording_id": "task_0065_user_0007_scene_0018_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_0065_user_0007_scene_0018_cfg_0002_2102_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_2058_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_2145_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0065", + "split": "eval", + "split_group_id": "task_0065_user_0007_scene_0018_cfg_0002", + "recording_id": "task_0065_user_0007_scene_0018_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_0065_user_0007_scene_0018_cfg_0002_2317_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_2304_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_2291_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0065", + "split": "eval", + "split_group_id": "task_0065_user_0007_scene_0018_cfg_0002", + "recording_id": "task_0065_user_0007_scene_0018_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_0065_user_0007_scene_0018_cfg_0002_810_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_859_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_761_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0065", + "split": "eval", + "split_group_id": "task_0065_user_0007_scene_0018_cfg_0002", + "recording_id": "task_0065_user_0007_scene_0018_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_0065_user_0007_scene_0018_cfg_0002_1854_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_1778_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_1816_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0065", + "split": "eval", + "split_group_id": "task_0065_user_0007_scene_0018_cfg_0002", + "recording_id": "task_0065_user_0007_scene_0018_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_0065_user_0007_scene_0018_cfg_0002_1455_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_1406_036422060215.jpg", + "images/task_0065_user_0007_scene_0018_cfg_0002_1504_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0065", + "split": "eval", + "split_group_id": "task_0065_user_0007_scene_0018_cfg_0002", + "recording_id": "task_0065_user_0007_scene_0018_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0066_user_0007_scene_0004_cfg_0002_23_036422060215.jpg", + "images/task_0066_user_0007_scene_0004_cfg_0002_53_036422060215.jpg", + "images/task_0066_user_0007_scene_0004_cfg_0002_83_036422060215.jpg", + "images/task_0066_user_0007_scene_0004_cfg_0002_113_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0007_scene_0004_cfg_0002", + "recording_id": "task_0066_user_0007_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0066_user_0007_scene_0004_cfg_0002_57_036422060215.jpg", + "images/task_0066_user_0007_scene_0004_cfg_0002_87_036422060215.jpg", + "images/task_0066_user_0007_scene_0004_cfg_0002_117_036422060215.jpg", + "images/task_0066_user_0007_scene_0004_cfg_0002_147_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0007_scene_0004_cfg_0002", + "recording_id": "task_0066_user_0007_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0066_user_0007_scene_0004_cfg_0002_91_036422060215.jpg", + "images/task_0066_user_0007_scene_0004_cfg_0002_121_036422060215.jpg", + "images/task_0066_user_0007_scene_0004_cfg_0002_151_036422060215.jpg", + "images/task_0066_user_0007_scene_0004_cfg_0002_181_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0007_scene_0004_cfg_0002", + "recording_id": "task_0066_user_0007_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0066_user_0007_scene_0004_cfg_0002_125_036422060215.jpg", + "images/task_0066_user_0007_scene_0004_cfg_0002_155_036422060215.jpg", + "images/task_0066_user_0007_scene_0004_cfg_0002_185_036422060215.jpg", + "images/task_0066_user_0007_scene_0004_cfg_0002_215_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0007_scene_0004_cfg_0002", + "recording_id": "task_0066_user_0007_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0066_user_0007_scene_0004_cfg_0002_159_036422060215.jpg", + "images/task_0066_user_0007_scene_0004_cfg_0002_189_036422060215.jpg", + "images/task_0066_user_0007_scene_0004_cfg_0002_219_036422060215.jpg", + "images/task_0066_user_0007_scene_0004_cfg_0002_249_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0007_scene_0004_cfg_0002", + "recording_id": "task_0066_user_0007_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0066_user_0007_scene_0004_cfg_0002_227_036422060215.jpg", + "images/task_0066_user_0007_scene_0004_cfg_0002_257_036422060215.jpg", + "images/task_0066_user_0007_scene_0004_cfg_0002_287_036422060215.jpg", + "images/task_0066_user_0007_scene_0004_cfg_0002_317_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0007_scene_0004_cfg_0002", + "recording_id": "task_0066_user_0007_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0066_user_0007_scene_0004_cfg_0002_261_036422060215.jpg", + "images/task_0066_user_0007_scene_0004_cfg_0002_291_036422060215.jpg", + "images/task_0066_user_0007_scene_0004_cfg_0002_321_036422060215.jpg", + "images/task_0066_user_0007_scene_0004_cfg_0002_351_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0007_scene_0004_cfg_0002", + "recording_id": "task_0066_user_0007_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0066_user_0007_scene_0004_cfg_0002_295_036422060215.jpg", + "images/task_0066_user_0007_scene_0004_cfg_0002_325_036422060215.jpg", + "images/task_0066_user_0007_scene_0004_cfg_0002_355_036422060215.jpg", + "images/task_0066_user_0007_scene_0004_cfg_0002_385_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0007_scene_0004_cfg_0002", + "recording_id": "task_0066_user_0007_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0066_user_0007_scene_0004_cfg_0002_329_036422060215.jpg", + "images/task_0066_user_0007_scene_0004_cfg_0002_359_036422060215.jpg", + "images/task_0066_user_0007_scene_0004_cfg_0002_389_036422060215.jpg", + "images/task_0066_user_0007_scene_0004_cfg_0002_419_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0007_scene_0004_cfg_0002", + "recording_id": "task_0066_user_0007_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0066_user_0007_scene_0004_cfg_0002_363_036422060215.jpg", + "images/task_0066_user_0007_scene_0004_cfg_0002_393_036422060215.jpg", + "images/task_0066_user_0007_scene_0004_cfg_0002_423_036422060215.jpg", + "images/task_0066_user_0007_scene_0004_cfg_0002_453_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0007_scene_0004_cfg_0002", + "recording_id": "task_0066_user_0007_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0066_user_0007_scene_0005_cfg_0002_6_036422060215.jpg", + "images/task_0066_user_0007_scene_0005_cfg_0002_14_036422060215.jpg", + "images/task_0066_user_0007_scene_0005_cfg_0002_22_036422060215.jpg", + "images/task_0066_user_0007_scene_0005_cfg_0002_30_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0066_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0066_user_0007_scene_0005_cfg_0002_14_036422060215.jpg", + "images/task_0066_user_0007_scene_0005_cfg_0002_22_036422060215.jpg", + "images/task_0066_user_0007_scene_0005_cfg_0002_30_036422060215.jpg", + "images/task_0066_user_0007_scene_0005_cfg_0002_38_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0066_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0066_user_0007_scene_0005_cfg_0002_22_036422060215.jpg", + "images/task_0066_user_0007_scene_0005_cfg_0002_30_036422060215.jpg", + "images/task_0066_user_0007_scene_0005_cfg_0002_38_036422060215.jpg", + "images/task_0066_user_0007_scene_0005_cfg_0002_46_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0066_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0066_user_0007_scene_0005_cfg_0002_30_036422060215.jpg", + "images/task_0066_user_0007_scene_0005_cfg_0002_38_036422060215.jpg", + "images/task_0066_user_0007_scene_0005_cfg_0002_46_036422060215.jpg", + "images/task_0066_user_0007_scene_0005_cfg_0002_54_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0066_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0066_user_0007_scene_0005_cfg_0002_38_036422060215.jpg", + "images/task_0066_user_0007_scene_0005_cfg_0002_46_036422060215.jpg", + "images/task_0066_user_0007_scene_0005_cfg_0002_54_036422060215.jpg", + "images/task_0066_user_0007_scene_0005_cfg_0002_62_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0066_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0066_user_0007_scene_0005_cfg_0002_54_036422060215.jpg", + "images/task_0066_user_0007_scene_0005_cfg_0002_62_036422060215.jpg", + "images/task_0066_user_0007_scene_0005_cfg_0002_70_036422060215.jpg", + "images/task_0066_user_0007_scene_0005_cfg_0002_78_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0066_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0066_user_0007_scene_0005_cfg_0002_62_036422060215.jpg", + "images/task_0066_user_0007_scene_0005_cfg_0002_70_036422060215.jpg", + "images/task_0066_user_0007_scene_0005_cfg_0002_78_036422060215.jpg", + "images/task_0066_user_0007_scene_0005_cfg_0002_86_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0066_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0066_user_0007_scene_0005_cfg_0002_70_036422060215.jpg", + "images/task_0066_user_0007_scene_0005_cfg_0002_78_036422060215.jpg", + "images/task_0066_user_0007_scene_0005_cfg_0002_86_036422060215.jpg", + "images/task_0066_user_0007_scene_0005_cfg_0002_94_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0066_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0066_user_0007_scene_0005_cfg_0002_78_036422060215.jpg", + "images/task_0066_user_0007_scene_0005_cfg_0002_86_036422060215.jpg", + "images/task_0066_user_0007_scene_0005_cfg_0002_94_036422060215.jpg", + "images/task_0066_user_0007_scene_0005_cfg_0002_102_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0066_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0066_user_0007_scene_0005_cfg_0002_86_036422060215.jpg", + "images/task_0066_user_0007_scene_0005_cfg_0002_94_036422060215.jpg", + "images/task_0066_user_0007_scene_0005_cfg_0002_102_036422060215.jpg", + "images/task_0066_user_0007_scene_0005_cfg_0002_110_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0066_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0005_cfg_0002_106_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_109_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_112_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_115_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_118_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0005_cfg_0002_142_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_145_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_148_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_151_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_154_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0005_cfg_0002_166_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_169_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_172_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_175_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_178_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0005_cfg_0002_33_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_63_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_93_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_123_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0005_cfg_0002_133_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_163_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_193_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_223_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0005_cfg_0002_233_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_263_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_293_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_323_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0005_cfg_0002_383_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_413_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_443_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_473_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0005_cfg_0002_483_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_513_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_543_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_573_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0005_cfg_0002_583_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_613_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_643_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_673_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0005_cfg_0002_1471_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_1484_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_1496_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0005_cfg_0002_1399_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_1411_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_1423_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0005_cfg_0002_579_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_613_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_596_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0005_cfg_0002_1572_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_1548_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_1560_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0005_cfg_0002_1334_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_1319_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_1349_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0005_cfg_0002_332_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_347_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_317_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0005_cfg_0002_678_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_666_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_690_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0005_cfg_0002_807_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_831_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_819_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0005_cfg_0002_1168_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_1234_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_1201_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0005_cfg_0002_922_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_948_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_973_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0005_cfg_0002_191_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_210_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_228_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0005_cfg_0002_474_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_450_036422060215.jpg", + "images/task_0066_user_0014_scene_0005_cfg_0002_462_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0006_cfg_0002_6_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_9_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_12_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_15_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_18_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0006_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0006_cfg_0002_1003_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_1006_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_1009_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_1012_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_1015_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0006_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0006_cfg_0002_2057_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_2060_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_2063_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_2066_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_2069_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0006_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0006_cfg_0002_42_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_72_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_102_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_132_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0006_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0006_cfg_0002_170_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_200_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_230_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_260_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0006_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0006_cfg_0002_298_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_328_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_358_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_388_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0006_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0006_cfg_0002_490_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_520_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_550_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_580_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0006_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0006_cfg_0002_618_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_648_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_678_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_708_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0006_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0006_cfg_0002_746_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_776_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_806_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_836_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0006_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0006_cfg_0002_1618_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_1634_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_1626_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0006_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0006_cfg_0002_1226_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_1244_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_1208_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0006_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0006_cfg_0002_1710_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_1700_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_1719_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0006_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0006_cfg_0002_1112_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_1136_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_1089_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0006_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0006_cfg_0002_2063_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_2045_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_2027_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0006_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0006_cfg_0002_450_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_435_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_420_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0006_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0006_cfg_0002_1318_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_1302_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_1333_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0006_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0006_cfg_0002_833_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_807_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_859_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0006_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0006_cfg_0002_1787_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_1778_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_1796_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0006_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0006_cfg_0002_1966_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_1944_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_1923_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0006_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0006_cfg_0002_145_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_133_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_157_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0006_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0006_cfg_0002_1498_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_1522_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_1545_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0006_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0006_cfg_0002_72_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_85_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_58_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0006_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0006_cfg_0002_323_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_311_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_335_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0006_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0006_cfg_0002_978_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_1007_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_948_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0006_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0006_cfg_0002_1396_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_1412_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_1404_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0006_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0006_cfg_0002_551_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_606_036422060215.jpg", + "images/task_0066_user_0014_scene_0006_cfg_0002_660_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0006_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_29_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. hold and carry\nC. approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. hold and carry" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_37_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. hold and carry\nC. transfer\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. approach" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_39_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. contact\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_43_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. release\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_75_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. contact\nC. approach\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. approach" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1249_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. contact\nC. approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1269_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. hold and carry\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. hold and carry" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1277_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. release\nC. transfer\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1291_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. transfer\nC. approach\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1365_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. approach\nC. contact\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_2537_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. release\nC. approach\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. approach" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_2548_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. release\nC. transfer\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_2681_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. hold and carry\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. hold and carry" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_2716_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. approach\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_2729_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_29_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_39_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1249_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1269_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_2537_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_2548_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_2716_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Down" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_20_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_30_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_40_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_50_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_70_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_80_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_90_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_100_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_80_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_90_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_100_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_110_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Left" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_130_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_140_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_150_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_160_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Left" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_140_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_150_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_160_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_170_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Left" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_150_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_160_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_170_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_180_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_180_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_190_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_200_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_210_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Down" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_210_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_220_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_230_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_240_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Up" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_260_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_270_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_280_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_290_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Up" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_270_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_280_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_290_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_300_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Up" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_280_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_290_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_300_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_310_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Down" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_480_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_490_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_500_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_510_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1359_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1362_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1365_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1368_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1371_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_2675_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2678_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2681_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2684_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2687_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_55_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_85_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_115_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_145_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_219_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_249_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_279_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_309_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_465_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_495_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_525_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_555_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_629_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_659_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_689_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_719_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_875_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_905_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_935_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_965_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1039_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1069_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1099_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1129_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1373_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1427_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_2270_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2383_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1427_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1437_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_702_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_603_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1016_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1154_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1953_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1828_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_475_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_387_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1285_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1356_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_492_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_475_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_44_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_70_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1285_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1251_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_360_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_185_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_2681_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2575_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_2217_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2270_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_2419_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2383_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_603_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_575_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_2419_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2575_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_895_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_761_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_2148_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2217_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_575_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_492_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1828_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1803_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_2047_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2130_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_761_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_702_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_2130_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2148_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_70_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_151_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1591_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1803_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1373_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1356_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1591_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1550_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1188_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1251_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1437_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1550_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1953_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2047_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1188_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1154_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_387_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_360_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_151_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_185_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1016_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_895_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_40_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_43_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_46_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_49_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_52_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_51_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_54_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_57_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_60_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_63_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_62_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_65_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_68_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_71_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_74_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_149_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_152_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_155_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_158_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_161_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_162_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_165_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_168_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_171_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_174_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_175_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_178_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_181_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_184_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_187_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_357_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_360_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_363_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_366_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_369_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_368_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_371_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_374_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_377_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_380_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_378_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_381_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_384_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_387_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_390_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_471_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_474_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_477_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_480_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_483_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_478_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_481_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_484_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_487_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_490_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_484_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_487_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_490_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_493_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_496_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_572_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_575_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_578_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_581_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_584_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_583_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_586_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_589_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_592_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_595_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_594_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_597_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_600_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_603_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_606_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_702_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_705_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_708_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_711_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_714_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_726_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_729_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_732_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_735_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_738_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_749_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_752_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_755_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_758_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_761_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_901_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_904_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_907_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_910_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_913_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_950_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_953_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_956_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_959_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_962_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_998_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1001_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1004_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1007_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1010_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1151_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1154_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1157_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1160_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1163_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1165_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1168_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1171_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1174_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1177_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1179_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1182_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1185_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1188_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1191_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1248_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1251_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1254_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1257_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1260_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1262_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1265_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1268_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1271_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1274_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1276_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1279_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1282_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1285_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1288_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1352_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1355_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1358_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1361_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1364_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1358_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1361_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1364_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1367_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1370_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1365_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1368_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1371_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1374_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1377_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1422_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1425_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1428_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1431_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1434_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1426_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1429_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1432_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1435_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1438_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1430_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1433_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1436_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1439_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1442_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1548_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1551_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1554_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1557_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1560_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1564_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1567_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1570_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1573_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1576_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1581_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1584_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1587_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1590_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1593_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1799_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1802_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1805_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1808_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1811_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1810_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1813_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1816_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1819_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1822_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1820_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1823_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1826_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1829_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1832_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1957_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1960_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1963_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1966_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1969_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1994_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1997_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2000_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2003_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2006_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_2031_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2034_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2037_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2040_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2043_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_2126_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2129_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2132_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2135_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2138_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_2133_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2136_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2139_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2142_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2145_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_2140_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2143_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2146_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2149_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2152_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_2216_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2219_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2222_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2225_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2228_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_2238_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2241_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2244_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2247_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2250_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_2259_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2262_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2265_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2268_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2271_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_2380_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2383_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2386_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2389_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2392_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_2395_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2398_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2401_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2404_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2407_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_2410_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2413_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2416_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2419_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2422_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_2580_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2583_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2586_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2589_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2592_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_2622_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2625_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2628_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2631_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2634_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_2664_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2667_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2670_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2673_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2676_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_604_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_589_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_574_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1548_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1570_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1593_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_492_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_484_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_475_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_359_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_374_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_388_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1950_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2000_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2050_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1020_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_891_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_956_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1152_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1171_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1190_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_2129_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2139_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2149_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_2244_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2272_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2215_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_71_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_43_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_57_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_168_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_186_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_150_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_732_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_700_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_763_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1356_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1364_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1373_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1249_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1287_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1268_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_1816_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1829_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_1802_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_2382_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2420_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2401_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_0066_user_0014_scene_0010_cfg_0002_2685_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2571_036422060215.jpg", + "images/task_0066_user_0014_scene_0010_cfg_0002_2628_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0066", + "split": "eval", + "split_group_id": "task_0066_user_0014_scene_0010_cfg_0002", + "recording_id": "task_0066_user_0014_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0067_user_0014_scene_0001_cfg_0002_6_036422060215.jpg", + "images/task_0067_user_0014_scene_0001_cfg_0002_9_036422060215.jpg", + "images/task_0067_user_0014_scene_0001_cfg_0002_12_036422060215.jpg", + "images/task_0067_user_0014_scene_0001_cfg_0002_15_036422060215.jpg", + "images/task_0067_user_0014_scene_0001_cfg_0002_18_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0067", + "split": "eval", + "split_group_id": "task_0067_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0067_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0067_user_0014_scene_0001_cfg_0002_24_036422060215.jpg", + "images/task_0067_user_0014_scene_0001_cfg_0002_27_036422060215.jpg", + "images/task_0067_user_0014_scene_0001_cfg_0002_30_036422060215.jpg", + "images/task_0067_user_0014_scene_0001_cfg_0002_33_036422060215.jpg", + "images/task_0067_user_0014_scene_0001_cfg_0002_36_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0067", + "split": "eval", + "split_group_id": "task_0067_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0067_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0067_user_0014_scene_0001_cfg_0002_86_036422060215.jpg", + "images/task_0067_user_0014_scene_0001_cfg_0002_89_036422060215.jpg", + "images/task_0067_user_0014_scene_0001_cfg_0002_92_036422060215.jpg", + "images/task_0067_user_0014_scene_0001_cfg_0002_95_036422060215.jpg", + "images/task_0067_user_0014_scene_0001_cfg_0002_98_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0067", + "split": "eval", + "split_group_id": "task_0067_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0067_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0067_user_0014_scene_0001_cfg_0002_191_036422060215.jpg", + "images/task_0067_user_0014_scene_0001_cfg_0002_194_036422060215.jpg", + "images/task_0067_user_0014_scene_0001_cfg_0002_197_036422060215.jpg", + "images/task_0067_user_0014_scene_0001_cfg_0002_200_036422060215.jpg", + "images/task_0067_user_0014_scene_0001_cfg_0002_203_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0067", + "split": "eval", + "split_group_id": "task_0067_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0067_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0067_user_0014_scene_0001_cfg_0002_209_036422060215.jpg", + "images/task_0067_user_0014_scene_0001_cfg_0002_212_036422060215.jpg", + "images/task_0067_user_0014_scene_0001_cfg_0002_215_036422060215.jpg", + "images/task_0067_user_0014_scene_0001_cfg_0002_218_036422060215.jpg", + "images/task_0067_user_0014_scene_0001_cfg_0002_221_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0067", + "split": "eval", + "split_group_id": "task_0067_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0067_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0067_user_0014_scene_0001_cfg_0002_5_036422060215.jpg", + "images/task_0067_user_0014_scene_0001_cfg_0002_12_036422060215.jpg", + "images/task_0067_user_0014_scene_0001_cfg_0002_19_036422060215.jpg", + "images/task_0067_user_0014_scene_0001_cfg_0002_26_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0067", + "split": "eval", + "split_group_id": "task_0067_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0067_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0067_user_0014_scene_0001_cfg_0002_19_036422060215.jpg", + "images/task_0067_user_0014_scene_0001_cfg_0002_26_036422060215.jpg", + "images/task_0067_user_0014_scene_0001_cfg_0002_33_036422060215.jpg", + "images/task_0067_user_0014_scene_0001_cfg_0002_40_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0067", + "split": "eval", + "split_group_id": "task_0067_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0067_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0067_user_0014_scene_0001_cfg_0002_33_036422060215.jpg", + "images/task_0067_user_0014_scene_0001_cfg_0002_40_036422060215.jpg", + "images/task_0067_user_0014_scene_0001_cfg_0002_47_036422060215.jpg", + "images/task_0067_user_0014_scene_0001_cfg_0002_54_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0067", + "split": "eval", + "split_group_id": "task_0067_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0067_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0067_user_0014_scene_0001_cfg_0002_54_036422060215.jpg", + "images/task_0067_user_0014_scene_0001_cfg_0002_61_036422060215.jpg", + "images/task_0067_user_0014_scene_0001_cfg_0002_68_036422060215.jpg", + "images/task_0067_user_0014_scene_0001_cfg_0002_75_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0067", + "split": "eval", + "split_group_id": "task_0067_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0067_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0067_user_0014_scene_0001_cfg_0002_68_036422060215.jpg", + "images/task_0067_user_0014_scene_0001_cfg_0002_75_036422060215.jpg", + "images/task_0067_user_0014_scene_0001_cfg_0002_82_036422060215.jpg", + "images/task_0067_user_0014_scene_0001_cfg_0002_89_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0067", + "split": "eval", + "split_group_id": "task_0067_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0067_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0067_user_0014_scene_0001_cfg_0002_82_036422060215.jpg", + "images/task_0067_user_0014_scene_0001_cfg_0002_89_036422060215.jpg", + "images/task_0067_user_0014_scene_0001_cfg_0002_96_036422060215.jpg", + "images/task_0067_user_0014_scene_0001_cfg_0002_103_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0067", + "split": "eval", + "split_group_id": "task_0067_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0067_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_0067_user_0014_scene_0001_cfg_0002_159_036422060215.jpg", + "images/task_0067_user_0014_scene_0001_cfg_0002_203_036422060215.jpg", + "images/task_0067_user_0014_scene_0001_cfg_0002_181_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0067", + "split": "eval", + "split_group_id": "task_0067_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0067_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_0067_user_0014_scene_0001_cfg_0002_84_036422060215.jpg", + "images/task_0067_user_0014_scene_0001_cfg_0002_71_036422060215.jpg", + "images/task_0067_user_0014_scene_0001_cfg_0002_96_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0067", + "split": "eval", + "split_group_id": "task_0067_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0067_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0067_user_0014_scene_0002_cfg_0002_6_036422060215.jpg", + "images/task_0067_user_0014_scene_0002_cfg_0002_9_036422060215.jpg", + "images/task_0067_user_0014_scene_0002_cfg_0002_12_036422060215.jpg", + "images/task_0067_user_0014_scene_0002_cfg_0002_15_036422060215.jpg", + "images/task_0067_user_0014_scene_0002_cfg_0002_18_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0067", + "split": "eval", + "split_group_id": "task_0067_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0067_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0067_user_0014_scene_0002_cfg_0002_24_036422060215.jpg", + "images/task_0067_user_0014_scene_0002_cfg_0002_27_036422060215.jpg", + "images/task_0067_user_0014_scene_0002_cfg_0002_30_036422060215.jpg", + "images/task_0067_user_0014_scene_0002_cfg_0002_33_036422060215.jpg", + "images/task_0067_user_0014_scene_0002_cfg_0002_36_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0067", + "split": "eval", + "split_group_id": "task_0067_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0067_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0067_user_0014_scene_0002_cfg_0002_53_036422060215.jpg", + "images/task_0067_user_0014_scene_0002_cfg_0002_56_036422060215.jpg", + "images/task_0067_user_0014_scene_0002_cfg_0002_59_036422060215.jpg", + "images/task_0067_user_0014_scene_0002_cfg_0002_62_036422060215.jpg", + "images/task_0067_user_0014_scene_0002_cfg_0002_65_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0067", + "split": "eval", + "split_group_id": "task_0067_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0067_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0067_user_0014_scene_0002_cfg_0002_132_036422060215.jpg", + "images/task_0067_user_0014_scene_0002_cfg_0002_135_036422060215.jpg", + "images/task_0067_user_0014_scene_0002_cfg_0002_138_036422060215.jpg", + "images/task_0067_user_0014_scene_0002_cfg_0002_141_036422060215.jpg", + "images/task_0067_user_0014_scene_0002_cfg_0002_144_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0067", + "split": "eval", + "split_group_id": "task_0067_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0067_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0067_user_0014_scene_0002_cfg_0002_150_036422060215.jpg", + "images/task_0067_user_0014_scene_0002_cfg_0002_153_036422060215.jpg", + "images/task_0067_user_0014_scene_0002_cfg_0002_156_036422060215.jpg", + "images/task_0067_user_0014_scene_0002_cfg_0002_159_036422060215.jpg", + "images/task_0067_user_0014_scene_0002_cfg_0002_162_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0067", + "split": "eval", + "split_group_id": "task_0067_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0067_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0067_user_0014_scene_0002_cfg_0002_233_036422060215.jpg", + "images/task_0067_user_0014_scene_0002_cfg_0002_236_036422060215.jpg", + "images/task_0067_user_0014_scene_0002_cfg_0002_239_036422060215.jpg", + "images/task_0067_user_0014_scene_0002_cfg_0002_242_036422060215.jpg", + "images/task_0067_user_0014_scene_0002_cfg_0002_245_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0067", + "split": "eval", + "split_group_id": "task_0067_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0067_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0067_user_0014_scene_0002_cfg_0002_6_036422060215.jpg", + "images/task_0067_user_0014_scene_0002_cfg_0002_16_036422060215.jpg", + "images/task_0067_user_0014_scene_0002_cfg_0002_26_036422060215.jpg", + "images/task_0067_user_0014_scene_0002_cfg_0002_36_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0067", + "split": "eval", + "split_group_id": "task_0067_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0067_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0067_user_0014_scene_0002_cfg_0002_26_036422060215.jpg", + "images/task_0067_user_0014_scene_0002_cfg_0002_36_036422060215.jpg", + "images/task_0067_user_0014_scene_0002_cfg_0002_46_036422060215.jpg", + "images/task_0067_user_0014_scene_0002_cfg_0002_56_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0067", + "split": "eval", + "split_group_id": "task_0067_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0067_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0067_user_0014_scene_0002_cfg_0002_46_036422060215.jpg", + "images/task_0067_user_0014_scene_0002_cfg_0002_56_036422060215.jpg", + "images/task_0067_user_0014_scene_0002_cfg_0002_66_036422060215.jpg", + "images/task_0067_user_0014_scene_0002_cfg_0002_76_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0067", + "split": "eval", + "split_group_id": "task_0067_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0067_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0067_user_0014_scene_0002_cfg_0002_56_036422060215.jpg", + "images/task_0067_user_0014_scene_0002_cfg_0002_66_036422060215.jpg", + "images/task_0067_user_0014_scene_0002_cfg_0002_76_036422060215.jpg", + "images/task_0067_user_0014_scene_0002_cfg_0002_86_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0067", + "split": "eval", + "split_group_id": "task_0067_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0067_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0067_user_0014_scene_0002_cfg_0002_76_036422060215.jpg", + "images/task_0067_user_0014_scene_0002_cfg_0002_86_036422060215.jpg", + "images/task_0067_user_0014_scene_0002_cfg_0002_96_036422060215.jpg", + "images/task_0067_user_0014_scene_0002_cfg_0002_106_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0067", + "split": "eval", + "split_group_id": "task_0067_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0067_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0067_user_0014_scene_0002_cfg_0002_96_036422060215.jpg", + "images/task_0067_user_0014_scene_0002_cfg_0002_106_036422060215.jpg", + "images/task_0067_user_0014_scene_0002_cfg_0002_116_036422060215.jpg", + "images/task_0067_user_0014_scene_0002_cfg_0002_126_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0067", + "split": "eval", + "split_group_id": "task_0067_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0067_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_0067_user_0014_scene_0002_cfg_0002_208_036422060215.jpg", + "images/task_0067_user_0014_scene_0002_cfg_0002_245_036422060215.jpg", + "images/task_0067_user_0014_scene_0002_cfg_0002_172_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0067", + "split": "eval", + "split_group_id": "task_0067_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0067_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0068_user_0014_scene_0006_cfg_0002_6_036422060215.jpg", + "images/task_0068_user_0014_scene_0006_cfg_0002_9_036422060215.jpg", + "images/task_0068_user_0014_scene_0006_cfg_0002_12_036422060215.jpg", + "images/task_0068_user_0014_scene_0006_cfg_0002_15_036422060215.jpg", + "images/task_0068_user_0014_scene_0006_cfg_0002_18_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0068", + "split": "eval", + "split_group_id": "task_0068_user_0014_scene_0006_cfg_0002", + "recording_id": "task_0068_user_0014_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0068_user_0014_scene_0006_cfg_0002_24_036422060215.jpg", + "images/task_0068_user_0014_scene_0006_cfg_0002_27_036422060215.jpg", + "images/task_0068_user_0014_scene_0006_cfg_0002_30_036422060215.jpg", + "images/task_0068_user_0014_scene_0006_cfg_0002_33_036422060215.jpg", + "images/task_0068_user_0014_scene_0006_cfg_0002_36_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0068", + "split": "eval", + "split_group_id": "task_0068_user_0014_scene_0006_cfg_0002", + "recording_id": "task_0068_user_0014_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0068_user_0014_scene_0006_cfg_0002_53_036422060215.jpg", + "images/task_0068_user_0014_scene_0006_cfg_0002_56_036422060215.jpg", + "images/task_0068_user_0014_scene_0006_cfg_0002_59_036422060215.jpg", + "images/task_0068_user_0014_scene_0006_cfg_0002_62_036422060215.jpg", + "images/task_0068_user_0014_scene_0006_cfg_0002_65_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0068", + "split": "eval", + "split_group_id": "task_0068_user_0014_scene_0006_cfg_0002", + "recording_id": "task_0068_user_0014_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0068_user_0014_scene_0006_cfg_0002_3_036422060215.jpg", + "images/task_0068_user_0014_scene_0006_cfg_0002_8_036422060215.jpg", + "images/task_0068_user_0014_scene_0006_cfg_0002_13_036422060215.jpg", + "images/task_0068_user_0014_scene_0006_cfg_0002_18_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0068", + "split": "eval", + "split_group_id": "task_0068_user_0014_scene_0006_cfg_0002", + "recording_id": "task_0068_user_0014_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0068_user_0014_scene_0006_cfg_0002_9_036422060215.jpg", + "images/task_0068_user_0014_scene_0006_cfg_0002_14_036422060215.jpg", + "images/task_0068_user_0014_scene_0006_cfg_0002_19_036422060215.jpg", + "images/task_0068_user_0014_scene_0006_cfg_0002_24_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0068", + "split": "eval", + "split_group_id": "task_0068_user_0014_scene_0006_cfg_0002", + "recording_id": "task_0068_user_0014_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0068_user_0014_scene_0006_cfg_0002_15_036422060215.jpg", + "images/task_0068_user_0014_scene_0006_cfg_0002_20_036422060215.jpg", + "images/task_0068_user_0014_scene_0006_cfg_0002_25_036422060215.jpg", + "images/task_0068_user_0014_scene_0006_cfg_0002_30_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0068", + "split": "eval", + "split_group_id": "task_0068_user_0014_scene_0006_cfg_0002", + "recording_id": "task_0068_user_0014_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0068_user_0014_scene_0006_cfg_0002_27_036422060215.jpg", + "images/task_0068_user_0014_scene_0006_cfg_0002_32_036422060215.jpg", + "images/task_0068_user_0014_scene_0006_cfg_0002_37_036422060215.jpg", + "images/task_0068_user_0014_scene_0006_cfg_0002_42_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0068", + "split": "eval", + "split_group_id": "task_0068_user_0014_scene_0006_cfg_0002", + "recording_id": "task_0068_user_0014_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0068_user_0014_scene_0006_cfg_0002_33_036422060215.jpg", + "images/task_0068_user_0014_scene_0006_cfg_0002_38_036422060215.jpg", + "images/task_0068_user_0014_scene_0006_cfg_0002_43_036422060215.jpg", + "images/task_0068_user_0014_scene_0006_cfg_0002_48_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0068", + "split": "eval", + "split_group_id": "task_0068_user_0014_scene_0006_cfg_0002", + "recording_id": "task_0068_user_0014_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0068_user_0014_scene_0006_cfg_0002_39_036422060215.jpg", + "images/task_0068_user_0014_scene_0006_cfg_0002_44_036422060215.jpg", + "images/task_0068_user_0014_scene_0006_cfg_0002_49_036422060215.jpg", + "images/task_0068_user_0014_scene_0006_cfg_0002_54_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0068", + "split": "eval", + "split_group_id": "task_0068_user_0014_scene_0006_cfg_0002", + "recording_id": "task_0068_user_0014_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0069_user_0007_scene_0005_cfg_0002_9_036422060215.jpg", + "images/task_0069_user_0007_scene_0005_cfg_0002_22_036422060215.jpg", + "images/task_0069_user_0007_scene_0005_cfg_0002_35_036422060215.jpg", + "images/task_0069_user_0007_scene_0005_cfg_0002_48_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0069_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0069_user_0007_scene_0005_cfg_0002_22_036422060215.jpg", + "images/task_0069_user_0007_scene_0005_cfg_0002_35_036422060215.jpg", + "images/task_0069_user_0007_scene_0005_cfg_0002_48_036422060215.jpg", + "images/task_0069_user_0007_scene_0005_cfg_0002_61_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0069_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0069_user_0007_scene_0005_cfg_0002_35_036422060215.jpg", + "images/task_0069_user_0007_scene_0005_cfg_0002_48_036422060215.jpg", + "images/task_0069_user_0007_scene_0005_cfg_0002_61_036422060215.jpg", + "images/task_0069_user_0007_scene_0005_cfg_0002_74_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0069_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0069_user_0007_scene_0005_cfg_0002_48_036422060215.jpg", + "images/task_0069_user_0007_scene_0005_cfg_0002_61_036422060215.jpg", + "images/task_0069_user_0007_scene_0005_cfg_0002_74_036422060215.jpg", + "images/task_0069_user_0007_scene_0005_cfg_0002_87_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0069_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0069_user_0007_scene_0005_cfg_0002_61_036422060215.jpg", + "images/task_0069_user_0007_scene_0005_cfg_0002_74_036422060215.jpg", + "images/task_0069_user_0007_scene_0005_cfg_0002_87_036422060215.jpg", + "images/task_0069_user_0007_scene_0005_cfg_0002_100_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0069_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0069_user_0007_scene_0005_cfg_0002_87_036422060215.jpg", + "images/task_0069_user_0007_scene_0005_cfg_0002_100_036422060215.jpg", + "images/task_0069_user_0007_scene_0005_cfg_0002_113_036422060215.jpg", + "images/task_0069_user_0007_scene_0005_cfg_0002_126_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0069_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0069_user_0007_scene_0005_cfg_0002_100_036422060215.jpg", + "images/task_0069_user_0007_scene_0005_cfg_0002_113_036422060215.jpg", + "images/task_0069_user_0007_scene_0005_cfg_0002_126_036422060215.jpg", + "images/task_0069_user_0007_scene_0005_cfg_0002_139_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0069_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0069_user_0007_scene_0005_cfg_0002_113_036422060215.jpg", + "images/task_0069_user_0007_scene_0005_cfg_0002_126_036422060215.jpg", + "images/task_0069_user_0007_scene_0005_cfg_0002_139_036422060215.jpg", + "images/task_0069_user_0007_scene_0005_cfg_0002_152_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0069_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0069_user_0007_scene_0005_cfg_0002_126_036422060215.jpg", + "images/task_0069_user_0007_scene_0005_cfg_0002_139_036422060215.jpg", + "images/task_0069_user_0007_scene_0005_cfg_0002_152_036422060215.jpg", + "images/task_0069_user_0007_scene_0005_cfg_0002_165_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0069_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0069_user_0007_scene_0005_cfg_0002_139_036422060215.jpg", + "images/task_0069_user_0007_scene_0005_cfg_0002_152_036422060215.jpg", + "images/task_0069_user_0007_scene_0005_cfg_0002_165_036422060215.jpg", + "images/task_0069_user_0007_scene_0005_cfg_0002_178_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0069_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. release\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. pre-approach" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_12_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. release\nC. approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. pre-approach" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_30_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. contact\nC. approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. approach" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_58_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. hold and carry\nC. transfer\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_59_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. transfer\nC. release\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. approach" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_94_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. transfer\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_112_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. contact\nC. hold and carry\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. hold and carry" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_124_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. hold and carry\nC. approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_157_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. transfer\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_197_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. pre-approach\nC. transfer\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_278_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. hold and carry\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. hold and carry" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_351_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. hold and carry\nC. release\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_378_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. transfer\nC. contact\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_422_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. hold and carry\nC. pre-approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. approach" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_444_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. hold and carry\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. hold and carry" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_528_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. contact\nC. pre-approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_547_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. transfer\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_550_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_12_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_30_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_58_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_94_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_112_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_278_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_351_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_422_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_444_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_528_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Up" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_40_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_50_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_60_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_70_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Up" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_50_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_60_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_70_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_80_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Up" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_60_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_70_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_80_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_90_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Down" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_140_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_150_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_160_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_170_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Down" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_150_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_160_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_170_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_180_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Down" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_160_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_170_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_180_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_190_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Left" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_290_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_300_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_310_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_320_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Left" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_300_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_310_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_320_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_330_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Left" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_380_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_390_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_400_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_410_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_530_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_540_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_550_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_560_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_540_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_550_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_560_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_570_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_6_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_9_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_12_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_15_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_18_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_24_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_27_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_30_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_33_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_36_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_53_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_56_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_59_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_62_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_65_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_151_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_154_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_157_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_160_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_163_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_522_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_525_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_528_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_531_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_534_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_13_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_32_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_51_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_70_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_51_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_70_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_89_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_108_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_89_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_108_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_127_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_146_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_127_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_146_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_165_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_184_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_165_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_184_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_203_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_222_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_203_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_222_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_241_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_260_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_363_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_308_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_308_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_163_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_526_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_459_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_163_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_95_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_363_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_459_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_96_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_99_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_102_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_105_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_108_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_123_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_126_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_129_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_132_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_135_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_150_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_153_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_156_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_159_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_162_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_308_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_311_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_314_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_317_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_320_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_330_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_333_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_336_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_339_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_342_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_351_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_354_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_357_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_360_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_363_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_460_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_463_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_466_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_469_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_472_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_486_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_489_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_492_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_495_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_498_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_513_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_516_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_519_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_522_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_525_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_93_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_129_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_165_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_365_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_336_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_306_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0001_cfg_0002_492_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_529_036422060215.jpg", + "images/task_0069_user_0014_scene_0001_cfg_0002_456_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. transfer\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. pre-approach" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_12_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. release\nC. transfer\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. pre-approach" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_30_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. transfer\nC. contact\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. approach" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_53_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. hold and carry\nC. transfer\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_53_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. contact\nC. hold and carry\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_102_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. contact\nC. pre-approach\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. approach" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_144_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. transfer\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_184_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. contact\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_300_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. pre-approach\nC. approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_308_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. pre-approach\nC. contact\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_323_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_387_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. pre-approach\nC. release\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. approach" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_395_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. release\nC. hold and carry\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_405_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. hold and carry\nC. pre-approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. hold and carry" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_464_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. hold and carry\nC. contact\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. hold and carry" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_528_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. pre-approach\nC. hold and carry\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_541_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. approach\nC. pre-approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_559_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_12_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_30_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_53_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_144_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_300_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_387_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_395_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_405_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_464_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_528_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Down" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_20_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_30_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_40_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_50_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Down" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_30_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_40_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_50_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_60_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Down" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_40_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_50_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_60_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_70_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Up" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_60_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_70_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_80_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_90_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Up" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_70_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_80_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_90_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_100_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Up" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_80_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_90_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_100_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_110_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Left" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_160_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_170_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_180_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_190_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_520_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_530_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_540_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_550_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_530_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_540_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_550_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_560_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_6_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_9_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_12_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_15_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_18_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_24_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_27_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_30_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_33_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_36_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_47_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_50_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_53_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_56_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_59_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_178_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_181_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_184_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_187_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_190_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_458_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_461_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_464_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_467_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_470_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_522_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_525_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_528_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_531_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_534_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_535_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_538_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_541_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_544_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_547_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_11_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_28_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_45_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_62_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_45_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_62_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_79_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_96_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_79_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_96_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_113_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_130_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_113_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_130_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_147_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_164_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_147_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_164_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_181_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_198_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_181_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_198_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_215_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_232_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_433_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_348_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_516_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_433_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_160_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_189_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_97_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_68_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_97_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_160_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_348_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_317_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_317_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_189_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_65_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_68_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_71_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_74_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_77_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_76_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_79_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_82_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_85_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_88_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_88_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_91_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_94_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_97_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_100_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_157_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_160_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_163_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_166_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_169_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_168_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_171_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_174_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_177_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_180_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_180_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_183_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_186_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_189_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_192_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_314_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_317_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_320_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_323_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_326_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_326_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_329_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_332_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_335_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_338_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_339_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_342_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_345_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_348_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_351_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_435_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_438_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_441_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_444_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_447_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_468_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_471_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_474_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_477_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_480_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_502_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_505_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_508_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_511_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_514_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_67_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_82_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_98_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_174_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_190_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_159_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_349_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_332_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_316_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_0069_user_0014_scene_0002_cfg_0002_430_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_474_036422060215.jpg", + "images/task_0069_user_0014_scene_0002_cfg_0002_519_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0069", + "split": "eval", + "split_group_id": "task_0069_user_0014_scene_0002_cfg_0002", + "recording_id": "task_0069_user_0014_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0070_user_0014_scene_0005_cfg_0002_13_036422060215.jpg", + "images/task_0070_user_0014_scene_0005_cfg_0002_33_036422060215.jpg", + "images/task_0070_user_0014_scene_0005_cfg_0002_53_036422060215.jpg", + "images/task_0070_user_0014_scene_0005_cfg_0002_73_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0070", + "split": "eval", + "split_group_id": "task_0070_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0070_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0070_user_0014_scene_0005_cfg_0002_53_036422060215.jpg", + "images/task_0070_user_0014_scene_0005_cfg_0002_73_036422060215.jpg", + "images/task_0070_user_0014_scene_0005_cfg_0002_93_036422060215.jpg", + "images/task_0070_user_0014_scene_0005_cfg_0002_113_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0070", + "split": "eval", + "split_group_id": "task_0070_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0070_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0070_user_0014_scene_0005_cfg_0002_133_036422060215.jpg", + "images/task_0070_user_0014_scene_0005_cfg_0002_153_036422060215.jpg", + "images/task_0070_user_0014_scene_0005_cfg_0002_173_036422060215.jpg", + "images/task_0070_user_0014_scene_0005_cfg_0002_193_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0070", + "split": "eval", + "split_group_id": "task_0070_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0070_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0070_user_0014_scene_0005_cfg_0002_173_036422060215.jpg", + "images/task_0070_user_0014_scene_0005_cfg_0002_193_036422060215.jpg", + "images/task_0070_user_0014_scene_0005_cfg_0002_213_036422060215.jpg", + "images/task_0070_user_0014_scene_0005_cfg_0002_233_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0070", + "split": "eval", + "split_group_id": "task_0070_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0070_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0070_user_0014_scene_0005_cfg_0002_213_036422060215.jpg", + "images/task_0070_user_0014_scene_0005_cfg_0002_233_036422060215.jpg", + "images/task_0070_user_0014_scene_0005_cfg_0002_253_036422060215.jpg", + "images/task_0070_user_0014_scene_0005_cfg_0002_273_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0070", + "split": "eval", + "split_group_id": "task_0070_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0070_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_0070_user_0014_scene_0005_cfg_0002_220_036422060215.jpg", + "images/task_0070_user_0014_scene_0005_cfg_0002_176_036422060215.jpg", + "images/task_0070_user_0014_scene_0005_cfg_0002_198_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0070", + "split": "eval", + "split_group_id": "task_0070_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0070_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_0070_user_0014_scene_0005_cfg_0002_304_036422060215.jpg", + "images/task_0070_user_0014_scene_0005_cfg_0002_290_036422060215.jpg", + "images/task_0070_user_0014_scene_0005_cfg_0002_317_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0070", + "split": "eval", + "split_group_id": "task_0070_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0070_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_0070_user_0014_scene_0005_cfg_0002_594_036422060215.jpg", + "images/task_0070_user_0014_scene_0005_cfg_0002_619_036422060215.jpg", + "images/task_0070_user_0014_scene_0005_cfg_0002_570_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0070", + "split": "eval", + "split_group_id": "task_0070_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0070_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_0070_user_0014_scene_0005_cfg_0002_421_036422060215.jpg", + "images/task_0070_user_0014_scene_0005_cfg_0002_404_036422060215.jpg", + "images/task_0070_user_0014_scene_0005_cfg_0002_412_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0070", + "split": "eval", + "split_group_id": "task_0070_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0070_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. pre-approach\nC. transfer\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. pre-approach" + } + ], + "images": [ + "images/task_0072_user_0014_scene_0004_cfg_0002_12_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0072", + "split": "eval", + "split_group_id": "task_0072_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0072_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. pre-approach\nC. transfer\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. pre-approach" + } + ], + "images": [ + "images/task_0072_user_0014_scene_0004_cfg_0002_30_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0072", + "split": "eval", + "split_group_id": "task_0072_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0072_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. approach\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. pre-approach" + } + ], + "images": [ + "images/task_0072_user_0014_scene_0004_cfg_0002_48_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0072", + "split": "eval", + "split_group_id": "task_0072_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0072_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. contact\nC. approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/task_0072_user_0014_scene_0004_cfg_0002_68_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0072", + "split": "eval", + "split_group_id": "task_0072_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0072_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. contact\nC. approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/task_0072_user_0014_scene_0004_cfg_0002_86_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0072", + "split": "eval", + "split_group_id": "task_0072_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0072_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. hold and carry\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. hold and carry" + } + ], + "images": [ + "images/task_0072_user_0014_scene_0004_cfg_0002_86_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0072", + "split": "eval", + "split_group_id": "task_0072_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0072_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. hold and carry\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. hold and carry" + } + ], + "images": [ + "images/task_0072_user_0014_scene_0004_cfg_0002_122_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0072", + "split": "eval", + "split_group_id": "task_0072_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0072_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. hold and carry\nC. approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/task_0072_user_0014_scene_0004_cfg_0002_153_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0072", + "split": "eval", + "split_group_id": "task_0072_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0072_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. contact\nC. transfer\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/task_0072_user_0014_scene_0004_cfg_0002_192_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0072", + "split": "eval", + "split_group_id": "task_0072_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0072_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. hold and carry\nC. approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. hold and carry" + } + ], + "images": [ + "images/task_0072_user_0014_scene_0004_cfg_0002_217_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0072", + "split": "eval", + "split_group_id": "task_0072_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0072_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. hold and carry\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/task_0072_user_0014_scene_0004_cfg_0002_241_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0072", + "split": "eval", + "split_group_id": "task_0072_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0072_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. contact\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/task_0072_user_0014_scene_0004_cfg_0002_252_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0072", + "split": "eval", + "split_group_id": "task_0072_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0072_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. release\nC. transfer\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/task_0072_user_0014_scene_0004_cfg_0002_259_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0072", + "split": "eval", + "split_group_id": "task_0072_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0072_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0072_user_0014_scene_0004_cfg_0002_12_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0072", + "split": "eval", + "split_group_id": "task_0072_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0072_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0072_user_0014_scene_0004_cfg_0002_30_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0072", + "split": "eval", + "split_group_id": "task_0072_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0072_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0072_user_0014_scene_0004_cfg_0002_48_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0072", + "split": "eval", + "split_group_id": "task_0072_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0072_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0072_user_0014_scene_0004_cfg_0002_68_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0072", + "split": "eval", + "split_group_id": "task_0072_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0072_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0072_user_0014_scene_0004_cfg_0002_86_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0072", + "split": "eval", + "split_group_id": "task_0072_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0072_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0072_user_0014_scene_0004_cfg_0002_122_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0072", + "split": "eval", + "split_group_id": "task_0072_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0072_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0072_user_0014_scene_0004_cfg_0002_217_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0072", + "split": "eval", + "split_group_id": "task_0072_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0072_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Up" + } + ], + "images": [ + "images/task_0072_user_0014_scene_0004_cfg_0002_40_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_50_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_60_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_70_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0072", + "split": "eval", + "split_group_id": "task_0072_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0072_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Up" + } + ], + "images": [ + "images/task_0072_user_0014_scene_0004_cfg_0002_50_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_60_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_70_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_80_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0072", + "split": "eval", + "split_group_id": "task_0072_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0072_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Up" + } + ], + "images": [ + "images/task_0072_user_0014_scene_0004_cfg_0002_60_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_70_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_80_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_90_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0072", + "split": "eval", + "split_group_id": "task_0072_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0072_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Left" + } + ], + "images": [ + "images/task_0072_user_0014_scene_0004_cfg_0002_80_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_90_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_100_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_110_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0072", + "split": "eval", + "split_group_id": "task_0072_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0072_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Left" + } + ], + "images": [ + "images/task_0072_user_0014_scene_0004_cfg_0002_90_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_100_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_110_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_120_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0072", + "split": "eval", + "split_group_id": "task_0072_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0072_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Left" + } + ], + "images": [ + "images/task_0072_user_0014_scene_0004_cfg_0002_100_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_110_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_120_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_130_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0072", + "split": "eval", + "split_group_id": "task_0072_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0072_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Left\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Down" + } + ], + "images": [ + "images/task_0072_user_0014_scene_0004_cfg_0002_130_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_140_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_150_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_160_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0072", + "split": "eval", + "split_group_id": "task_0072_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0072_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Down" + } + ], + "images": [ + "images/task_0072_user_0014_scene_0004_cfg_0002_140_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_150_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_160_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_170_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0072", + "split": "eval", + "split_group_id": "task_0072_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0072_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Left\nC. Down\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Down" + } + ], + "images": [ + "images/task_0072_user_0014_scene_0004_cfg_0002_150_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_160_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_170_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_180_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0072", + "split": "eval", + "split_group_id": "task_0072_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0072_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0072_user_0014_scene_0004_cfg_0002_6_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_9_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_12_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_15_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_18_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0072", + "split": "eval", + "split_group_id": "task_0072_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0072_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0072_user_0014_scene_0004_cfg_0002_42_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_45_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_48_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_51_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_54_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0072", + "split": "eval", + "split_group_id": "task_0072_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0072_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0072_user_0014_scene_0004_cfg_0002_147_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_150_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_153_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_156_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_159_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0072", + "split": "eval", + "split_group_id": "task_0072_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0072_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0072_user_0014_scene_0004_cfg_0002_186_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_189_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_192_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_195_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_198_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0072", + "split": "eval", + "split_group_id": "task_0072_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0072_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0072_user_0014_scene_0004_cfg_0002_246_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_249_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_252_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_255_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_258_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0072", + "split": "eval", + "split_group_id": "task_0072_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0072_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0072_user_0014_scene_0004_cfg_0002_6_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_14_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_22_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_30_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0072", + "split": "eval", + "split_group_id": "task_0072_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0072_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0072_user_0014_scene_0004_cfg_0002_22_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_30_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_38_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_46_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0072", + "split": "eval", + "split_group_id": "task_0072_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0072_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0072_user_0014_scene_0004_cfg_0002_38_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_46_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_54_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_62_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0072", + "split": "eval", + "split_group_id": "task_0072_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0072_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0072_user_0014_scene_0004_cfg_0002_54_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_62_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_70_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_78_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0072", + "split": "eval", + "split_group_id": "task_0072_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0072_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0072_user_0014_scene_0004_cfg_0002_70_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_78_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_86_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_94_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0072", + "split": "eval", + "split_group_id": "task_0072_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0072_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0072_user_0014_scene_0004_cfg_0002_86_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_94_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_102_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_110_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0072", + "split": "eval", + "split_group_id": "task_0072_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0072_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0072_user_0014_scene_0004_cfg_0002_121_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_218_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0072", + "split": "eval", + "split_group_id": "task_0072_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0072_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0072_user_0014_scene_0004_cfg_0002_125_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_128_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_131_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_134_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_137_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0072", + "split": "eval", + "split_group_id": "task_0072_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0072_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0072_user_0014_scene_0004_cfg_0002_164_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_167_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_170_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_173_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_176_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0072", + "split": "eval", + "split_group_id": "task_0072_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0072_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0072_user_0014_scene_0004_cfg_0002_202_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_205_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_208_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_211_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_214_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0072", + "split": "eval", + "split_group_id": "task_0072_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0072_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_0072_user_0014_scene_0004_cfg_0002_222_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_117_036422060215.jpg", + "images/task_0072_user_0014_scene_0004_cfg_0002_170_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0072", + "split": "eval", + "split_group_id": "task_0072_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0072_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. pre-approach" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_14_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. release\nC. hold and carry\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. pre-approach" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_32_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. transfer\nC. hold and carry\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. pre-approach" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_50_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. hold and carry\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_74_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. pre-approach\nC. release\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. hold and carry" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_82_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. pre-approach\nC. approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. approach" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_84_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. pre-approach\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_103_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. transfer\nC. hold and carry\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_116_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. hold and carry\nC. contact\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_134_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_148_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. contact\nC. hold and carry\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. approach" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_181_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_181_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. approach" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_199_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. hold and carry\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_208_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. approach\nC. hold and carry\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_218_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. hold and carry\nC. contact\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_262_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. hold and carry\nC. pre-approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. hold and carry" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_270_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. release\nC. approach\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_279_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_14_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_32_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_50_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_74_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_84_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_103_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_181_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_199_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_208_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_262_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Left\nC. Down\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Down" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_40_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_50_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_60_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_70_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Down" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_50_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_60_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_70_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_80_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Down" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_60_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_70_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_80_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_90_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Down\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Up" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_100_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_110_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_120_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_130_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Up" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_110_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_120_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_130_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_140_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_120_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_130_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_140_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_150_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Left\nC. Down\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Left" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_150_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_160_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_170_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_180_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Left" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_160_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_170_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_180_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_190_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Left" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_170_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_180_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_190_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_200_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_190_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_200_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_210_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_220_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_200_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_210_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_220_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_230_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Up" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_250_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_260_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_270_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_280_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_8_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_11_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_14_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_17_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_20_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_110_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_113_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_116_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_119_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_122_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_128_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_131_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_134_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_137_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_140_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_6_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_15_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_24_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_33_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_24_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_33_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_42_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_51_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_42_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_51_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_60_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_69_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_60_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_69_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_78_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_87_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_78_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_87_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_96_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_105_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_96_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_105_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_114_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_123_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_136_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_193_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_281_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_270_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_216_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_270_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_136_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_96_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_193_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_216_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_94_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_97_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_100_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_103_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_106_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_110_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_113_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_116_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_119_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_122_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_126_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_129_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_132_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_135_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_138_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_190_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_193_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_196_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_199_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_202_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_198_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_201_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_204_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_207_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_210_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_207_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_210_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_213_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_216_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_219_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_265_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_268_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_271_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_274_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_277_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_270_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_273_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_276_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_279_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_282_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_274_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_277_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_280_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_283_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_286_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_116_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_138_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_94_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_0073_user_0014_scene_0008_cfg_0002_216_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_204_036422060215.jpg", + "images/task_0073_user_0014_scene_0008_cfg_0002_193_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0073", + "split": "eval", + "split_group_id": "task_0073_user_0014_scene_0008_cfg_0002", + "recording_id": "task_0073_user_0014_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. pre-approach" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_51_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. contact\nC. approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. pre-approach" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_74_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. release\nC. pre-approach\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. pre-approach" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_92_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. hold and carry\nC. approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_111_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. transfer\nC. contact\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. approach" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_111_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. contact\nC. approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_112_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. pre-approach\nC. transfer\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_147_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. release\nC. pre-approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_150_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. approach\nC. transfer\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. approach" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_188_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. hold and carry\nC. transfer\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_196_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. contact\nC. transfer\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_204_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. approach\nC. contact\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_220_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. release\nC. hold and carry\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. hold and carry" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_248_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. transfer\nC. pre-approach\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_275_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_288_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_51_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_74_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_92_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_111_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_112_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_188_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_196_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_248_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_275_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Left" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_40_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_50_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_60_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_70_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Left" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_50_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_60_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_70_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_80_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Left" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_60_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_70_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_80_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_90_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Up" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_90_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_100_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_110_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_120_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_110_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_120_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_130_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_140_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_120_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_130_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_140_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_150_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_130_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_140_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_150_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_160_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Up" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_160_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_170_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_180_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_190_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Down" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_220_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_230_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_240_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_250_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_45_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_48_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_51_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_54_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_57_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_86_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_89_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_92_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_95_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_98_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_105_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_108_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_111_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_114_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_117_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_141_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_144_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_147_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_150_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_153_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_214_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_217_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_220_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_223_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_226_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_6_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_15_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_24_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_33_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_24_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_33_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_42_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_51_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_42_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_51_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_60_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_69_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_60_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_69_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_78_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_87_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_78_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_87_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_96_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_105_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_96_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_105_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_114_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_123_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_147_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_124_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_217_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_269_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_217_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_147_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_120_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_123_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_126_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_129_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_132_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_130_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_133_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_136_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_139_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_142_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_139_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_142_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_145_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_148_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_151_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_216_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_219_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_222_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_225_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_228_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_237_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_240_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_243_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_246_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_249_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_258_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_261_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_264_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_267_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_270_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_136_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_123_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_148_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_0075_user_0014_scene_0005_cfg_0002_271_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_243_036422060215.jpg", + "images/task_0075_user_0014_scene_0005_cfg_0002_215_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0075", + "split": "eval", + "split_group_id": "task_0075_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0075_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0076_user_0007_scene_0001_cfg_0002_16_036422060215.jpg", + "images/task_0076_user_0007_scene_0001_cfg_0002_40_036422060215.jpg", + "images/task_0076_user_0007_scene_0001_cfg_0002_64_036422060215.jpg", + "images/task_0076_user_0007_scene_0001_cfg_0002_88_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0076_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0076_user_0007_scene_0001_cfg_0002_40_036422060215.jpg", + "images/task_0076_user_0007_scene_0001_cfg_0002_64_036422060215.jpg", + "images/task_0076_user_0007_scene_0001_cfg_0002_88_036422060215.jpg", + "images/task_0076_user_0007_scene_0001_cfg_0002_112_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0076_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0076_user_0007_scene_0001_cfg_0002_64_036422060215.jpg", + "images/task_0076_user_0007_scene_0001_cfg_0002_88_036422060215.jpg", + "images/task_0076_user_0007_scene_0001_cfg_0002_112_036422060215.jpg", + "images/task_0076_user_0007_scene_0001_cfg_0002_136_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0076_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0076_user_0007_scene_0001_cfg_0002_88_036422060215.jpg", + "images/task_0076_user_0007_scene_0001_cfg_0002_112_036422060215.jpg", + "images/task_0076_user_0007_scene_0001_cfg_0002_136_036422060215.jpg", + "images/task_0076_user_0007_scene_0001_cfg_0002_160_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0076_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0076_user_0007_scene_0001_cfg_0002_112_036422060215.jpg", + "images/task_0076_user_0007_scene_0001_cfg_0002_136_036422060215.jpg", + "images/task_0076_user_0007_scene_0001_cfg_0002_160_036422060215.jpg", + "images/task_0076_user_0007_scene_0001_cfg_0002_184_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0076_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0076_user_0007_scene_0001_cfg_0002_160_036422060215.jpg", + "images/task_0076_user_0007_scene_0001_cfg_0002_184_036422060215.jpg", + "images/task_0076_user_0007_scene_0001_cfg_0002_208_036422060215.jpg", + "images/task_0076_user_0007_scene_0001_cfg_0002_232_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0076_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0076_user_0007_scene_0001_cfg_0002_184_036422060215.jpg", + "images/task_0076_user_0007_scene_0001_cfg_0002_208_036422060215.jpg", + "images/task_0076_user_0007_scene_0001_cfg_0002_232_036422060215.jpg", + "images/task_0076_user_0007_scene_0001_cfg_0002_256_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0076_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0076_user_0007_scene_0001_cfg_0002_208_036422060215.jpg", + "images/task_0076_user_0007_scene_0001_cfg_0002_232_036422060215.jpg", + "images/task_0076_user_0007_scene_0001_cfg_0002_256_036422060215.jpg", + "images/task_0076_user_0007_scene_0001_cfg_0002_280_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0076_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0076_user_0007_scene_0001_cfg_0002_232_036422060215.jpg", + "images/task_0076_user_0007_scene_0001_cfg_0002_256_036422060215.jpg", + "images/task_0076_user_0007_scene_0001_cfg_0002_280_036422060215.jpg", + "images/task_0076_user_0007_scene_0001_cfg_0002_304_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0076_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0076_user_0007_scene_0001_cfg_0002_256_036422060215.jpg", + "images/task_0076_user_0007_scene_0001_cfg_0002_280_036422060215.jpg", + "images/task_0076_user_0007_scene_0001_cfg_0002_304_036422060215.jpg", + "images/task_0076_user_0007_scene_0001_cfg_0002_328_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0076_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. contact\nC. hold and carry\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. pre-approach" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_12_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. transfer\nC. release\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. pre-approach" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_30_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. release\nC. approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. approach" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_63_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. pre-approach\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_63_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. contact\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_137_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_189_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. contact\nC. hold and carry\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. hold and carry" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_227_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. pre-approach\nC. release\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. approach" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_369_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. pre-approach\nC. transfer\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_405_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. hold and carry\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_574_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. approach\nC. pre-approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. approach" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_703_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. hold and carry\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_708_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. hold and carry\nC. contact\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. hold and carry" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_763_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. release\nC. hold and carry\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_1156_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. pre-approach\nC. release\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. hold and carry" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_1164_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. hold and carry\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_1179_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. release\nC. pre-approach\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_1181_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_12_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_30_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_63_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_189_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_227_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_369_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_703_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_708_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_763_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_1156_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Up" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_40_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_50_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_60_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_70_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Up" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_50_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_60_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_70_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_80_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Up" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_60_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_70_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_80_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_90_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_90_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_100_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_110_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_120_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_100_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_110_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_120_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_130_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_110_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_120_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_130_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_140_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Left" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_170_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_180_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_190_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_200_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Down" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_190_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_200_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_210_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_220_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Down" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_200_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_210_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_220_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_230_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Left" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_410_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_420_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_430_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_440_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Left" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_420_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_430_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_440_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_450_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Left\nC. Down\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Down" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_500_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_510_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_520_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_530_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_6_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_9_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_12_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_15_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_18_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_24_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_27_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_30_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_33_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_36_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_57_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_60_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_63_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_66_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_69_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_399_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_402_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_405_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_408_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_411_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_1158_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_1161_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_1164_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_1167_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_1170_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_1173_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_1176_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_1179_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_1182_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_1185_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_24_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_54_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_84_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_114_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_96_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_126_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_156_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_186_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_168_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_198_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_228_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_258_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_240_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_270_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_300_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_330_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_384_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_414_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_444_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_474_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_239_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_390_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_1065_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_1166_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_207_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_239_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_502_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_429_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_523_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_502_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_952_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_998_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_577_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_734_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_1182_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_1166_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_566_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_577_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_85_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_125_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_789_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_734_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_952_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_789_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_566_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_523_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_207_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_125_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_390_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_429_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_998_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_1056_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_83_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_86_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_89_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_92_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_95_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_99_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_102_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_105_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_108_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_111_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_115_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_118_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_121_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_124_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_127_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_204_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_207_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_210_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_213_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_216_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_217_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_220_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_223_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_226_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_229_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_230_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_233_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_236_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_239_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_242_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_388_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_391_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_394_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_397_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_400_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_404_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_407_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_410_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_413_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_416_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_419_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_422_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_425_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_428_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_431_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_498_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_501_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_504_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_507_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_510_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_506_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_509_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_512_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_515_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_518_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_515_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_518_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_521_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_524_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_527_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_561_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_564_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_567_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_570_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_573_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_566_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_569_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_572_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_575_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_578_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_570_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_573_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_576_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_579_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_582_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_733_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_736_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_739_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_742_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_745_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_756_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_759_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_762_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_765_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_768_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_778_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_781_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_784_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_787_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_790_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_951_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_954_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_957_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_960_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_963_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_969_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_972_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_975_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_978_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_981_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_987_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_990_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_993_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_996_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_999_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_1051_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_1054_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_1057_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_1060_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_1063_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_1054_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_1057_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_1060_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_1063_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_1066_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_1058_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_1061_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_1064_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_1067_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_1070_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_1161_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_1164_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_1167_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_1170_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_1173_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_1168_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_1171_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_1174_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_1177_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_1180_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_1175_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_1178_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_1181_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_1184_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_1187_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_1183_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_1165_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_1174_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_127_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_105_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_83_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_732_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_791_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_762_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_975_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_999_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_951_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_501_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_512_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_524_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_223_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_206_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_240_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0004_cfg_0002_430_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_410_036422060215.jpg", + "images/task_0076_user_0014_scene_0004_cfg_0002_389_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0004_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. contact\nC. hold and carry\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. pre-approach" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_45_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. pre-approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. pre-approach" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_81_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. hold and carry\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. approach" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_105_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. pre-approach\nC. transfer\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_105_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. transfer\nC. approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_149_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. hold and carry\nC. pre-approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. hold and carry" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_157_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_259_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. approach\nC. contact\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. approach" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_516_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. approach\nC. transfer\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_827_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. pre-approach\nC. release\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_948_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. transfer\nC. approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_956_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. transfer\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_970_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. transfer\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. approach" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_1518_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. release\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. hold and carry" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_1699_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. pre-approach\nC. hold and carry\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_1765_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. hold and carry\nC. pre-approach\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_1773_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. release\nC. approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_1786_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_45_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_81_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_105_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_149_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_516_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_948_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_1518_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_1699_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_1765_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Up" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_40_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_50_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_60_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_70_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Up" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_50_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_60_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_70_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_80_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Up" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_60_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_70_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_80_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_90_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_110_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_120_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_130_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_140_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_120_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_130_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_140_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_150_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Left" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_210_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_220_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_230_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_240_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Left" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_250_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_260_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_270_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_280_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Down" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_510_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_520_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_530_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_540_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Down" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_520_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_530_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_540_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_550_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Down" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_800_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_810_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_820_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_830_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Left" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_1410_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1420_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1430_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1440_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_39_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_42_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_45_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_48_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_51_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_99_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_102_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_105_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_108_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_111_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_821_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_824_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_827_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_830_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_833_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_1693_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1696_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1699_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1702_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1705_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_1767_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1770_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1773_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1776_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1779_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_36_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_66_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_96_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_126_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_146_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_176_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_206_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_236_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_256_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_286_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_316_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_346_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_421_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_451_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_481_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_511_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_531_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_561_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_591_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_621_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_641_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_671_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_701_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_731_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_721_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_829_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_961_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_862_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_1168_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_984_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_526_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_485_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_227_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_335_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_862_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_829_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_227_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_147_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_642_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_611_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_1256_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1406_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_358_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_485_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_701_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_721_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_701_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_642_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_335_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_358_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_1536_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1421_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_1772_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1649_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_1421_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1406_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_1536_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1649_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_961_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_984_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_1782_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1772_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_526_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_611_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_1168_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1256_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_150_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_153_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_156_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_159_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_162_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_181_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_184_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_187_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_190_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_193_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_212_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_215_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_218_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_221_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_224_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_332_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_335_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_338_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_341_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_344_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_340_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_343_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_346_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_349_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_352_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_349_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_352_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_355_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_358_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_361_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_483_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_486_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_489_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_492_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_495_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_500_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_503_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_506_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_509_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_512_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_516_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_519_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_522_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_525_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_528_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_608_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_611_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_614_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_617_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_620_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_620_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_623_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_626_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_629_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_632_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_633_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_636_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_639_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_642_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_645_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_698_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_701_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_704_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_707_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_710_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_705_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_708_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_711_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_714_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_717_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_712_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_715_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_718_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_721_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_724_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_826_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_829_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_832_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_835_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_838_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_840_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_843_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_846_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_849_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_852_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_853_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_856_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_859_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_862_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_865_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_957_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_960_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_963_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_966_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_969_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_966_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_969_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_972_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_975_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_978_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_976_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_979_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_982_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_985_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_988_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_1171_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1174_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1177_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1180_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1183_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_1206_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1209_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1212_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1215_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1218_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_1241_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1244_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1247_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1250_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1253_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_1401_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1404_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1407_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1410_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1413_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_1408_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1411_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1414_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1417_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1420_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_1414_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1417_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1420_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1423_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1426_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_1542_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1545_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1548_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1551_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1554_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_1586_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1589_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1592_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1595_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1598_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_1631_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1634_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1637_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1640_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1643_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_1767_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1770_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1773_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1776_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1779_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_1771_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1774_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1777_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1780_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1783_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_1775_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1778_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1781_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1784_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1787_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_863_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_846_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_828_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_187_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_145_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_229_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_506_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_483_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_528_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_985_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_960_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_972_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_358_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_335_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_346_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_1533_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1592_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1652_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_643_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_610_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_626_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_1165_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1212_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1259_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_1422_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1414_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_1406_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0007_cfg_0002_721_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_711_036422060215.jpg", + "images/task_0076_user_0014_scene_0007_cfg_0002_701_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0007_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. contact\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. pre-approach" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_12_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. pre-approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. pre-approach" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_30_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. pre-approach\nC. approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. approach" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_52_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. transfer\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_52_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. contact\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_86_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. hold and carry\nC. pre-approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. hold and carry" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_114_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_182_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. hold and carry\nC. contact\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_1414_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. hold and carry\nC. contact\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. hold and carry" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_1422_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. pre-approach\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. approach" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_1424_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_1435_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. hold and carry\nC. transfer\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_1505_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. release\nC. pre-approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. approach" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_2455_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. release\nC. hold and carry\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_2796_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. approach\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. hold and carry" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_2822_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. approach\nC. contact\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_2871_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. contact\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_2876_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_12_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_30_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_52_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_86_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_114_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_1414_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_1424_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_2455_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_2796_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_2822_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_70_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_80_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_90_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_100_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_80_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_90_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_100_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_110_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_90_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_100_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_110_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_120_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Left" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_170_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_180_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_190_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_200_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Left" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_180_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_190_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_200_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_210_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Left" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_190_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_200_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_210_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_220_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Up" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_480_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_490_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_500_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_510_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Up" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_490_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_500_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_510_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_520_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Down\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Up" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_500_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_510_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_520_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_530_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Down" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_550_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_560_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_570_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_580_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Down" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_930_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_940_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_950_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_960_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Down" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_1190_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1200_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1210_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1220_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_6_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_9_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_12_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_15_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_18_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_24_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_27_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_30_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_33_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_36_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_46_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_49_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_52_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_55_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_58_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_1416_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1419_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1422_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1425_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1428_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_1429_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1432_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1435_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1438_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1441_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_2816_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2819_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2822_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2825_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2828_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_2865_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2868_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2871_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2874_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2877_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_58_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_88_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_118_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_148_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_232_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_262_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_292_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_322_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_493_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_523_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_553_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_583_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_667_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_697_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_727_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_757_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_928_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_958_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_988_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1018_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_1102_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1132_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1162_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1192_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_772_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_762_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_852_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_828_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_2867_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2821_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_852_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_914_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_1198_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1124_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_1198_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1440_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_2206_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2088_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_828_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_772_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_2821_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2529_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_534_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_632_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_762_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_672_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_632_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_672_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_484_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_534_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_93_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_169_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_914_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_956_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_484_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_169_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_1440_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1489_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_956_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1124_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_2383_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2276_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_1648_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1489_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_2009_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1714_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_2276_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2206_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_2397_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2383_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_1714_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1648_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_2397_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2480_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_2480_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2529_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_2088_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2009_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_95_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_98_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_101_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_104_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_107_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_125_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_128_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_131_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_134_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_137_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_155_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_158_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_161_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_164_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_167_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_484_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_487_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_490_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_493_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_496_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_503_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_506_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_509_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_512_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_515_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_522_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_525_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_528_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_531_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_534_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_630_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_633_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_636_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_639_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_642_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_646_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_649_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_652_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_655_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_658_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_662_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_665_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_668_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_671_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_674_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_757_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_760_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_763_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_766_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_769_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_761_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_764_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_767_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_770_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_773_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_765_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_768_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_771_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_774_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_777_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_824_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_827_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_830_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_833_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_836_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_834_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_837_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_840_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_843_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_846_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_844_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_847_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_850_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_853_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_856_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_912_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_915_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_918_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_921_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_924_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_929_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_932_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_935_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_938_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_941_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_946_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_949_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_952_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_955_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_958_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_1126_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1129_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1132_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1135_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1138_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_1155_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1158_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1161_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1164_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1167_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_1184_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1187_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1190_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1193_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1196_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_1439_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1442_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1445_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1448_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1451_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_1458_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1461_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1464_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1467_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1470_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_1478_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1481_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1484_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1487_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1490_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_1649_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1652_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1655_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1658_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1661_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_1675_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1678_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1681_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1684_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1687_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_1701_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1704_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1707_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1710_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1713_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_2011_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2014_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2017_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2020_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2023_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_2042_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2045_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2048_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2051_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2054_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_2074_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2077_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2080_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2083_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2086_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_2208_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2211_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2214_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2217_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2220_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_2235_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2238_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2241_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2244_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2247_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_2262_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2265_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2268_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2271_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2274_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_2378_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2381_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2384_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2387_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2390_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_2384_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2387_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2390_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2393_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2396_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_2390_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2393_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2396_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2399_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2402_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_2479_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2482_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2485_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2488_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2491_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_2498_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2501_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2504_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2507_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2510_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_2518_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2521_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2524_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2527_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2530_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_2820_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2823_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2826_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2829_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2832_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_2838_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2841_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2844_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2847_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2850_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_2856_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2859_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2862_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2865_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2868_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_482_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_509_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_536_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_2007_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2048_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2090_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_2504_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2478_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2531_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_2278_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2204_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2241_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_935_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_913_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_957_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_1646_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1681_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1716_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_1490_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1464_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1439_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_1122_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1161_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_1200_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_827_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_840_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_853_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_2868_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2844_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_2820_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_673_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_652_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_631_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_0076_user_0014_scene_0009_cfg_0002_91_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_171_036422060215.jpg", + "images/task_0076_user_0014_scene_0009_cfg_0002_131_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0076", + "split": "eval", + "split_group_id": "task_0076_user_0014_scene_0009_cfg_0002", + "recording_id": "task_0076_user_0014_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. contact\nC. pre-approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. pre-approach" + } + ], + "images": [ + "images/task_0078_user_0007_scene_0003_cfg_0002_82_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0078", + "split": "eval", + "split_group_id": "task_0078_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0078_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. hold and carry\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. pre-approach" + } + ], + "images": [ + "images/task_0078_user_0007_scene_0003_cfg_0002_118_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0078", + "split": "eval", + "split_group_id": "task_0078_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0078_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. contact\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/task_0078_user_0007_scene_0003_cfg_0002_142_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0078", + "split": "eval", + "split_group_id": "task_0078_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0078_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. release\nC. transfer\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. hold and carry" + } + ], + "images": [ + "images/task_0078_user_0007_scene_0003_cfg_0002_150_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0078", + "split": "eval", + "split_group_id": "task_0078_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0078_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. contact\nC. hold and carry\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. hold and carry" + } + ], + "images": [ + "images/task_0078_user_0007_scene_0003_cfg_0002_168_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0078", + "split": "eval", + "split_group_id": "task_0078_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0078_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. hold and carry\nC. contact\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/task_0078_user_0007_scene_0003_cfg_0002_170_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0078", + "split": "eval", + "split_group_id": "task_0078_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0078_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. transfer\nC. contact\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/task_0078_user_0007_scene_0003_cfg_0002_188_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0078", + "split": "eval", + "split_group_id": "task_0078_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0078_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. contact\nC. release\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/task_0078_user_0007_scene_0003_cfg_0002_189_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0078", + "split": "eval", + "split_group_id": "task_0078_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0078_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0078_user_0007_scene_0003_cfg_0002_0_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0078", + "split": "eval", + "split_group_id": "task_0078_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0078_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0078_user_0007_scene_0003_cfg_0002_36_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0078", + "split": "eval", + "split_group_id": "task_0078_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0078_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0078_user_0007_scene_0003_cfg_0002_78_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0078", + "split": "eval", + "split_group_id": "task_0078_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0078_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0078_user_0007_scene_0003_cfg_0002_118_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0078", + "split": "eval", + "split_group_id": "task_0078_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0078_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0078_user_0007_scene_0003_cfg_0002_142_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0078", + "split": "eval", + "split_group_id": "task_0078_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0078_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0078_user_0007_scene_0003_cfg_0002_160_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0078", + "split": "eval", + "split_group_id": "task_0078_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0078_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0078_user_0007_scene_0003_cfg_0002_178_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0078", + "split": "eval", + "split_group_id": "task_0078_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0078_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0078_user_0007_scene_0003_cfg_0002_206_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0078", + "split": "eval", + "split_group_id": "task_0078_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0078_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0078_user_0007_scene_0003_cfg_0002_242_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0078", + "split": "eval", + "split_group_id": "task_0078_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0078_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Left\nC. Down\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Up" + } + ], + "images": [ + "images/task_0078_user_0007_scene_0003_cfg_0002_40_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_50_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_60_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_70_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0078", + "split": "eval", + "split_group_id": "task_0078_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0078_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Up" + } + ], + "images": [ + "images/task_0078_user_0007_scene_0003_cfg_0002_50_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_60_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_70_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_80_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0078", + "split": "eval", + "split_group_id": "task_0078_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0078_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Up" + } + ], + "images": [ + "images/task_0078_user_0007_scene_0003_cfg_0002_60_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_70_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_80_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_90_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0078", + "split": "eval", + "split_group_id": "task_0078_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0078_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Left" + } + ], + "images": [ + "images/task_0078_user_0007_scene_0003_cfg_0002_80_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_90_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_100_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_110_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0078", + "split": "eval", + "split_group_id": "task_0078_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0078_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Left" + } + ], + "images": [ + "images/task_0078_user_0007_scene_0003_cfg_0002_90_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_100_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_110_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_120_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0078", + "split": "eval", + "split_group_id": "task_0078_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0078_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Left" + } + ], + "images": [ + "images/task_0078_user_0007_scene_0003_cfg_0002_100_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_110_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_120_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_130_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0078", + "split": "eval", + "split_group_id": "task_0078_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0078_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Down" + } + ], + "images": [ + "images/task_0078_user_0007_scene_0003_cfg_0002_110_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_120_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_130_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_140_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0078", + "split": "eval", + "split_group_id": "task_0078_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0078_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Down" + } + ], + "images": [ + "images/task_0078_user_0007_scene_0003_cfg_0002_120_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_130_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_140_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_150_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0078", + "split": "eval", + "split_group_id": "task_0078_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0078_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Down" + } + ], + "images": [ + "images/task_0078_user_0007_scene_0003_cfg_0002_130_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_140_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_150_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_160_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0078", + "split": "eval", + "split_group_id": "task_0078_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0078_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Left\nC. Down\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_0078_user_0007_scene_0003_cfg_0002_170_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_180_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_190_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_200_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0078", + "split": "eval", + "split_group_id": "task_0078_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0078_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_0078_user_0007_scene_0003_cfg_0002_180_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_190_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_200_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_210_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0078", + "split": "eval", + "split_group_id": "task_0078_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0078_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_0078_user_0007_scene_0003_cfg_0002_190_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_200_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_210_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_220_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0078", + "split": "eval", + "split_group_id": "task_0078_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0078_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0078_user_0007_scene_0003_cfg_0002_76_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_79_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_82_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_85_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_88_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0078", + "split": "eval", + "split_group_id": "task_0078_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0078_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0078_user_0007_scene_0003_cfg_0002_144_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_147_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_150_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_153_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_156_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0078", + "split": "eval", + "split_group_id": "task_0078_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0078_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0078_user_0007_scene_0003_cfg_0002_182_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_185_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_188_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_191_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_194_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0078", + "split": "eval", + "split_group_id": "task_0078_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0078_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0078_user_0007_scene_0003_cfg_0002_5_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_12_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_19_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_26_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0078", + "split": "eval", + "split_group_id": "task_0078_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0078_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0078_user_0007_scene_0003_cfg_0002_33_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_40_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_47_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_54_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0078", + "split": "eval", + "split_group_id": "task_0078_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0078_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0078_user_0007_scene_0003_cfg_0002_47_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_54_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_61_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_68_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0078", + "split": "eval", + "split_group_id": "task_0078_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0078_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0078_user_0007_scene_0003_cfg_0002_61_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_68_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_75_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_82_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0078", + "split": "eval", + "split_group_id": "task_0078_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0078_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0078_user_0007_scene_0003_cfg_0002_75_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_82_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_89_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_96_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0078", + "split": "eval", + "split_group_id": "task_0078_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0078_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0078_user_0007_scene_0003_cfg_0002_189_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_158_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0078", + "split": "eval", + "split_group_id": "task_0078_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0078_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0078_user_0007_scene_0003_cfg_0002_156_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_159_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_162_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_165_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_168_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0078", + "split": "eval", + "split_group_id": "task_0078_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0078_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0078_user_0007_scene_0003_cfg_0002_168_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_171_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_174_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_177_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_180_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0078", + "split": "eval", + "split_group_id": "task_0078_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0078_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0078_user_0007_scene_0003_cfg_0002_179_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_182_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_185_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_188_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_191_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0078", + "split": "eval", + "split_group_id": "task_0078_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0078_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_0078_user_0007_scene_0003_cfg_0002_190_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_157_036422060215.jpg", + "images/task_0078_user_0007_scene_0003_cfg_0002_174_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0078", + "split": "eval", + "split_group_id": "task_0078_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0078_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0082_user_0007_scene_0008_cfg_0002_25_036422060215.jpg", + "images/task_0082_user_0007_scene_0008_cfg_0002_55_036422060215.jpg", + "images/task_0082_user_0007_scene_0008_cfg_0002_85_036422060215.jpg", + "images/task_0082_user_0007_scene_0008_cfg_0002_115_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0082", + "split": "eval", + "split_group_id": "task_0082_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0082_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0082_user_0007_scene_0008_cfg_0002_101_036422060215.jpg", + "images/task_0082_user_0007_scene_0008_cfg_0002_131_036422060215.jpg", + "images/task_0082_user_0007_scene_0008_cfg_0002_161_036422060215.jpg", + "images/task_0082_user_0007_scene_0008_cfg_0002_191_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0082", + "split": "eval", + "split_group_id": "task_0082_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0082_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0082_user_0007_scene_0008_cfg_0002_177_036422060215.jpg", + "images/task_0082_user_0007_scene_0008_cfg_0002_207_036422060215.jpg", + "images/task_0082_user_0007_scene_0008_cfg_0002_237_036422060215.jpg", + "images/task_0082_user_0007_scene_0008_cfg_0002_267_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0082", + "split": "eval", + "split_group_id": "task_0082_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0082_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0082_user_0007_scene_0008_cfg_0002_291_036422060215.jpg", + "images/task_0082_user_0007_scene_0008_cfg_0002_321_036422060215.jpg", + "images/task_0082_user_0007_scene_0008_cfg_0002_351_036422060215.jpg", + "images/task_0082_user_0007_scene_0008_cfg_0002_381_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0082", + "split": "eval", + "split_group_id": "task_0082_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0082_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0082_user_0007_scene_0008_cfg_0002_367_036422060215.jpg", + "images/task_0082_user_0007_scene_0008_cfg_0002_397_036422060215.jpg", + "images/task_0082_user_0007_scene_0008_cfg_0002_427_036422060215.jpg", + "images/task_0082_user_0007_scene_0008_cfg_0002_457_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0082", + "split": "eval", + "split_group_id": "task_0082_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0082_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0082_user_0007_scene_0008_cfg_0002_443_036422060215.jpg", + "images/task_0082_user_0007_scene_0008_cfg_0002_473_036422060215.jpg", + "images/task_0082_user_0007_scene_0008_cfg_0002_503_036422060215.jpg", + "images/task_0082_user_0007_scene_0008_cfg_0002_533_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0082", + "split": "eval", + "split_group_id": "task_0082_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0082_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_0082_user_0007_scene_0008_cfg_0002_941_036422060215.jpg", + "images/task_0082_user_0007_scene_0008_cfg_0002_912_036422060215.jpg", + "images/task_0082_user_0007_scene_0008_cfg_0002_882_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0082", + "split": "eval", + "split_group_id": "task_0082_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0082_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_0082_user_0007_scene_0008_cfg_0002_742_036422060215.jpg", + "images/task_0082_user_0007_scene_0008_cfg_0002_708_036422060215.jpg", + "images/task_0082_user_0007_scene_0008_cfg_0002_725_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0082", + "split": "eval", + "split_group_id": "task_0082_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0082_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_0082_user_0007_scene_0008_cfg_0002_1010_036422060215.jpg", + "images/task_0082_user_0007_scene_0008_cfg_0002_1018_036422060215.jpg", + "images/task_0082_user_0007_scene_0008_cfg_0002_1026_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0082", + "split": "eval", + "split_group_id": "task_0082_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0082_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_0082_user_0007_scene_0008_cfg_0002_92_036422060215.jpg", + "images/task_0082_user_0007_scene_0008_cfg_0002_105_036422060215.jpg", + "images/task_0082_user_0007_scene_0008_cfg_0002_79_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0082", + "split": "eval", + "split_group_id": "task_0082_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0082_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_0082_user_0007_scene_0008_cfg_0002_538_036422060215.jpg", + "images/task_0082_user_0007_scene_0008_cfg_0002_305_036422060215.jpg", + "images/task_0082_user_0007_scene_0008_cfg_0002_422_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0082", + "split": "eval", + "split_group_id": "task_0082_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0082_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_0082_user_0007_scene_0008_cfg_0002_1162_036422060215.jpg", + "images/task_0082_user_0007_scene_0008_cfg_0002_1207_036422060215.jpg", + "images/task_0082_user_0007_scene_0008_cfg_0002_1118_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0082", + "split": "eval", + "split_group_id": "task_0082_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0082_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0082_user_0007_scene_0009_cfg_0002_23_036422060215.jpg", + "images/task_0082_user_0007_scene_0009_cfg_0002_53_036422060215.jpg", + "images/task_0082_user_0007_scene_0009_cfg_0002_83_036422060215.jpg", + "images/task_0082_user_0007_scene_0009_cfg_0002_113_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0082", + "split": "eval", + "split_group_id": "task_0082_user_0007_scene_0009_cfg_0002", + "recording_id": "task_0082_user_0007_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0082_user_0007_scene_0009_cfg_0002_57_036422060215.jpg", + "images/task_0082_user_0007_scene_0009_cfg_0002_87_036422060215.jpg", + "images/task_0082_user_0007_scene_0009_cfg_0002_117_036422060215.jpg", + "images/task_0082_user_0007_scene_0009_cfg_0002_147_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0082", + "split": "eval", + "split_group_id": "task_0082_user_0007_scene_0009_cfg_0002", + "recording_id": "task_0082_user_0007_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0082_user_0007_scene_0009_cfg_0002_91_036422060215.jpg", + "images/task_0082_user_0007_scene_0009_cfg_0002_121_036422060215.jpg", + "images/task_0082_user_0007_scene_0009_cfg_0002_151_036422060215.jpg", + "images/task_0082_user_0007_scene_0009_cfg_0002_181_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0082", + "split": "eval", + "split_group_id": "task_0082_user_0007_scene_0009_cfg_0002", + "recording_id": "task_0082_user_0007_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0082_user_0007_scene_0009_cfg_0002_125_036422060215.jpg", + "images/task_0082_user_0007_scene_0009_cfg_0002_155_036422060215.jpg", + "images/task_0082_user_0007_scene_0009_cfg_0002_185_036422060215.jpg", + "images/task_0082_user_0007_scene_0009_cfg_0002_215_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0082", + "split": "eval", + "split_group_id": "task_0082_user_0007_scene_0009_cfg_0002", + "recording_id": "task_0082_user_0007_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0082_user_0007_scene_0009_cfg_0002_159_036422060215.jpg", + "images/task_0082_user_0007_scene_0009_cfg_0002_189_036422060215.jpg", + "images/task_0082_user_0007_scene_0009_cfg_0002_219_036422060215.jpg", + "images/task_0082_user_0007_scene_0009_cfg_0002_249_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0082", + "split": "eval", + "split_group_id": "task_0082_user_0007_scene_0009_cfg_0002", + "recording_id": "task_0082_user_0007_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0082_user_0007_scene_0009_cfg_0002_227_036422060215.jpg", + "images/task_0082_user_0007_scene_0009_cfg_0002_257_036422060215.jpg", + "images/task_0082_user_0007_scene_0009_cfg_0002_287_036422060215.jpg", + "images/task_0082_user_0007_scene_0009_cfg_0002_317_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0082", + "split": "eval", + "split_group_id": "task_0082_user_0007_scene_0009_cfg_0002", + "recording_id": "task_0082_user_0007_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0082_user_0007_scene_0009_cfg_0002_261_036422060215.jpg", + "images/task_0082_user_0007_scene_0009_cfg_0002_291_036422060215.jpg", + "images/task_0082_user_0007_scene_0009_cfg_0002_321_036422060215.jpg", + "images/task_0082_user_0007_scene_0009_cfg_0002_351_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0082", + "split": "eval", + "split_group_id": "task_0082_user_0007_scene_0009_cfg_0002", + "recording_id": "task_0082_user_0007_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0082_user_0007_scene_0009_cfg_0002_295_036422060215.jpg", + "images/task_0082_user_0007_scene_0009_cfg_0002_325_036422060215.jpg", + "images/task_0082_user_0007_scene_0009_cfg_0002_355_036422060215.jpg", + "images/task_0082_user_0007_scene_0009_cfg_0002_385_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0082", + "split": "eval", + "split_group_id": "task_0082_user_0007_scene_0009_cfg_0002", + "recording_id": "task_0082_user_0007_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0082_user_0007_scene_0009_cfg_0002_329_036422060215.jpg", + "images/task_0082_user_0007_scene_0009_cfg_0002_359_036422060215.jpg", + "images/task_0082_user_0007_scene_0009_cfg_0002_389_036422060215.jpg", + "images/task_0082_user_0007_scene_0009_cfg_0002_419_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0082", + "split": "eval", + "split_group_id": "task_0082_user_0007_scene_0009_cfg_0002", + "recording_id": "task_0082_user_0007_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0082_user_0007_scene_0009_cfg_0002_363_036422060215.jpg", + "images/task_0082_user_0007_scene_0009_cfg_0002_393_036422060215.jpg", + "images/task_0082_user_0007_scene_0009_cfg_0002_423_036422060215.jpg", + "images/task_0082_user_0007_scene_0009_cfg_0002_453_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0082", + "split": "eval", + "split_group_id": "task_0082_user_0007_scene_0009_cfg_0002", + "recording_id": "task_0082_user_0007_scene_0009_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0083_user_0007_scene_0003_cfg_0002_19_036422060215.jpg", + "images/task_0083_user_0007_scene_0003_cfg_0002_47_036422060215.jpg", + "images/task_0083_user_0007_scene_0003_cfg_0002_75_036422060215.jpg", + "images/task_0083_user_0007_scene_0003_cfg_0002_103_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0083", + "split": "eval", + "split_group_id": "task_0083_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0083_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0083_user_0007_scene_0003_cfg_0002_47_036422060215.jpg", + "images/task_0083_user_0007_scene_0003_cfg_0002_75_036422060215.jpg", + "images/task_0083_user_0007_scene_0003_cfg_0002_103_036422060215.jpg", + "images/task_0083_user_0007_scene_0003_cfg_0002_131_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0083", + "split": "eval", + "split_group_id": "task_0083_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0083_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0083_user_0007_scene_0003_cfg_0002_75_036422060215.jpg", + "images/task_0083_user_0007_scene_0003_cfg_0002_103_036422060215.jpg", + "images/task_0083_user_0007_scene_0003_cfg_0002_131_036422060215.jpg", + "images/task_0083_user_0007_scene_0003_cfg_0002_159_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0083", + "split": "eval", + "split_group_id": "task_0083_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0083_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0083_user_0007_scene_0003_cfg_0002_103_036422060215.jpg", + "images/task_0083_user_0007_scene_0003_cfg_0002_131_036422060215.jpg", + "images/task_0083_user_0007_scene_0003_cfg_0002_159_036422060215.jpg", + "images/task_0083_user_0007_scene_0003_cfg_0002_187_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0083", + "split": "eval", + "split_group_id": "task_0083_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0083_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0083_user_0007_scene_0003_cfg_0002_131_036422060215.jpg", + "images/task_0083_user_0007_scene_0003_cfg_0002_159_036422060215.jpg", + "images/task_0083_user_0007_scene_0003_cfg_0002_187_036422060215.jpg", + "images/task_0083_user_0007_scene_0003_cfg_0002_215_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0083", + "split": "eval", + "split_group_id": "task_0083_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0083_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0083_user_0007_scene_0003_cfg_0002_187_036422060215.jpg", + "images/task_0083_user_0007_scene_0003_cfg_0002_215_036422060215.jpg", + "images/task_0083_user_0007_scene_0003_cfg_0002_243_036422060215.jpg", + "images/task_0083_user_0007_scene_0003_cfg_0002_271_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0083", + "split": "eval", + "split_group_id": "task_0083_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0083_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0083_user_0007_scene_0003_cfg_0002_215_036422060215.jpg", + "images/task_0083_user_0007_scene_0003_cfg_0002_243_036422060215.jpg", + "images/task_0083_user_0007_scene_0003_cfg_0002_271_036422060215.jpg", + "images/task_0083_user_0007_scene_0003_cfg_0002_299_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0083", + "split": "eval", + "split_group_id": "task_0083_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0083_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0083_user_0007_scene_0003_cfg_0002_243_036422060215.jpg", + "images/task_0083_user_0007_scene_0003_cfg_0002_271_036422060215.jpg", + "images/task_0083_user_0007_scene_0003_cfg_0002_299_036422060215.jpg", + "images/task_0083_user_0007_scene_0003_cfg_0002_327_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0083", + "split": "eval", + "split_group_id": "task_0083_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0083_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0083_user_0007_scene_0003_cfg_0002_271_036422060215.jpg", + "images/task_0083_user_0007_scene_0003_cfg_0002_299_036422060215.jpg", + "images/task_0083_user_0007_scene_0003_cfg_0002_327_036422060215.jpg", + "images/task_0083_user_0007_scene_0003_cfg_0002_355_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0083", + "split": "eval", + "split_group_id": "task_0083_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0083_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0083_user_0007_scene_0003_cfg_0002_299_036422060215.jpg", + "images/task_0083_user_0007_scene_0003_cfg_0002_327_036422060215.jpg", + "images/task_0083_user_0007_scene_0003_cfg_0002_355_036422060215.jpg", + "images/task_0083_user_0007_scene_0003_cfg_0002_383_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0083", + "split": "eval", + "split_group_id": "task_0083_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0083_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0083_user_0007_scene_0010_cfg_0002_18_036422060215.jpg", + "images/task_0083_user_0007_scene_0010_cfg_0002_46_036422060215.jpg", + "images/task_0083_user_0007_scene_0010_cfg_0002_74_036422060215.jpg", + "images/task_0083_user_0007_scene_0010_cfg_0002_102_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0083", + "split": "eval", + "split_group_id": "task_0083_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0083_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0083_user_0007_scene_0010_cfg_0002_46_036422060215.jpg", + "images/task_0083_user_0007_scene_0010_cfg_0002_74_036422060215.jpg", + "images/task_0083_user_0007_scene_0010_cfg_0002_102_036422060215.jpg", + "images/task_0083_user_0007_scene_0010_cfg_0002_130_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0083", + "split": "eval", + "split_group_id": "task_0083_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0083_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0083_user_0007_scene_0010_cfg_0002_74_036422060215.jpg", + "images/task_0083_user_0007_scene_0010_cfg_0002_102_036422060215.jpg", + "images/task_0083_user_0007_scene_0010_cfg_0002_130_036422060215.jpg", + "images/task_0083_user_0007_scene_0010_cfg_0002_158_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0083", + "split": "eval", + "split_group_id": "task_0083_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0083_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0083_user_0007_scene_0010_cfg_0002_102_036422060215.jpg", + "images/task_0083_user_0007_scene_0010_cfg_0002_130_036422060215.jpg", + "images/task_0083_user_0007_scene_0010_cfg_0002_158_036422060215.jpg", + "images/task_0083_user_0007_scene_0010_cfg_0002_186_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0083", + "split": "eval", + "split_group_id": "task_0083_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0083_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0083_user_0007_scene_0010_cfg_0002_130_036422060215.jpg", + "images/task_0083_user_0007_scene_0010_cfg_0002_158_036422060215.jpg", + "images/task_0083_user_0007_scene_0010_cfg_0002_186_036422060215.jpg", + "images/task_0083_user_0007_scene_0010_cfg_0002_214_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0083", + "split": "eval", + "split_group_id": "task_0083_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0083_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0083_user_0007_scene_0010_cfg_0002_186_036422060215.jpg", + "images/task_0083_user_0007_scene_0010_cfg_0002_214_036422060215.jpg", + "images/task_0083_user_0007_scene_0010_cfg_0002_242_036422060215.jpg", + "images/task_0083_user_0007_scene_0010_cfg_0002_270_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0083", + "split": "eval", + "split_group_id": "task_0083_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0083_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0083_user_0007_scene_0010_cfg_0002_214_036422060215.jpg", + "images/task_0083_user_0007_scene_0010_cfg_0002_242_036422060215.jpg", + "images/task_0083_user_0007_scene_0010_cfg_0002_270_036422060215.jpg", + "images/task_0083_user_0007_scene_0010_cfg_0002_298_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0083", + "split": "eval", + "split_group_id": "task_0083_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0083_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0083_user_0007_scene_0010_cfg_0002_242_036422060215.jpg", + "images/task_0083_user_0007_scene_0010_cfg_0002_270_036422060215.jpg", + "images/task_0083_user_0007_scene_0010_cfg_0002_298_036422060215.jpg", + "images/task_0083_user_0007_scene_0010_cfg_0002_326_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0083", + "split": "eval", + "split_group_id": "task_0083_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0083_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0083_user_0007_scene_0010_cfg_0002_270_036422060215.jpg", + "images/task_0083_user_0007_scene_0010_cfg_0002_298_036422060215.jpg", + "images/task_0083_user_0007_scene_0010_cfg_0002_326_036422060215.jpg", + "images/task_0083_user_0007_scene_0010_cfg_0002_354_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0083", + "split": "eval", + "split_group_id": "task_0083_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0083_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0083_user_0007_scene_0010_cfg_0002_298_036422060215.jpg", + "images/task_0083_user_0007_scene_0010_cfg_0002_326_036422060215.jpg", + "images/task_0083_user_0007_scene_0010_cfg_0002_354_036422060215.jpg", + "images/task_0083_user_0007_scene_0010_cfg_0002_382_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0083", + "split": "eval", + "split_group_id": "task_0083_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0083_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0084_user_0007_scene_0004_cfg_0002_13_036422060215.jpg", + "images/task_0084_user_0007_scene_0004_cfg_0002_33_036422060215.jpg", + "images/task_0084_user_0007_scene_0004_cfg_0002_53_036422060215.jpg", + "images/task_0084_user_0007_scene_0004_cfg_0002_73_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0084", + "split": "eval", + "split_group_id": "task_0084_user_0007_scene_0004_cfg_0002", + "recording_id": "task_0084_user_0007_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0084_user_0007_scene_0004_cfg_0002_53_036422060215.jpg", + "images/task_0084_user_0007_scene_0004_cfg_0002_73_036422060215.jpg", + "images/task_0084_user_0007_scene_0004_cfg_0002_93_036422060215.jpg", + "images/task_0084_user_0007_scene_0004_cfg_0002_113_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0084", + "split": "eval", + "split_group_id": "task_0084_user_0007_scene_0004_cfg_0002", + "recording_id": "task_0084_user_0007_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0084_user_0007_scene_0004_cfg_0002_93_036422060215.jpg", + "images/task_0084_user_0007_scene_0004_cfg_0002_113_036422060215.jpg", + "images/task_0084_user_0007_scene_0004_cfg_0002_133_036422060215.jpg", + "images/task_0084_user_0007_scene_0004_cfg_0002_153_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0084", + "split": "eval", + "split_group_id": "task_0084_user_0007_scene_0004_cfg_0002", + "recording_id": "task_0084_user_0007_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0084_user_0007_scene_0004_cfg_0002_133_036422060215.jpg", + "images/task_0084_user_0007_scene_0004_cfg_0002_153_036422060215.jpg", + "images/task_0084_user_0007_scene_0004_cfg_0002_173_036422060215.jpg", + "images/task_0084_user_0007_scene_0004_cfg_0002_193_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0084", + "split": "eval", + "split_group_id": "task_0084_user_0007_scene_0004_cfg_0002", + "recording_id": "task_0084_user_0007_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0084_user_0007_scene_0004_cfg_0002_173_036422060215.jpg", + "images/task_0084_user_0007_scene_0004_cfg_0002_193_036422060215.jpg", + "images/task_0084_user_0007_scene_0004_cfg_0002_213_036422060215.jpg", + "images/task_0084_user_0007_scene_0004_cfg_0002_233_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0084", + "split": "eval", + "split_group_id": "task_0084_user_0007_scene_0004_cfg_0002", + "recording_id": "task_0084_user_0007_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0084_user_0007_scene_0004_cfg_0002_213_036422060215.jpg", + "images/task_0084_user_0007_scene_0004_cfg_0002_233_036422060215.jpg", + "images/task_0084_user_0007_scene_0004_cfg_0002_253_036422060215.jpg", + "images/task_0084_user_0007_scene_0004_cfg_0002_273_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0084", + "split": "eval", + "split_group_id": "task_0084_user_0007_scene_0004_cfg_0002", + "recording_id": "task_0084_user_0007_scene_0004_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0084_user_0007_scene_0007_cfg_0002_9_036422060215.jpg", + "images/task_0084_user_0007_scene_0007_cfg_0002_23_036422060215.jpg", + "images/task_0084_user_0007_scene_0007_cfg_0002_37_036422060215.jpg", + "images/task_0084_user_0007_scene_0007_cfg_0002_51_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0084", + "split": "eval", + "split_group_id": "task_0084_user_0007_scene_0007_cfg_0002", + "recording_id": "task_0084_user_0007_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0084_user_0007_scene_0007_cfg_0002_37_036422060215.jpg", + "images/task_0084_user_0007_scene_0007_cfg_0002_51_036422060215.jpg", + "images/task_0084_user_0007_scene_0007_cfg_0002_65_036422060215.jpg", + "images/task_0084_user_0007_scene_0007_cfg_0002_79_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0084", + "split": "eval", + "split_group_id": "task_0084_user_0007_scene_0007_cfg_0002", + "recording_id": "task_0084_user_0007_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0084_user_0007_scene_0007_cfg_0002_65_036422060215.jpg", + "images/task_0084_user_0007_scene_0007_cfg_0002_79_036422060215.jpg", + "images/task_0084_user_0007_scene_0007_cfg_0002_93_036422060215.jpg", + "images/task_0084_user_0007_scene_0007_cfg_0002_107_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0084", + "split": "eval", + "split_group_id": "task_0084_user_0007_scene_0007_cfg_0002", + "recording_id": "task_0084_user_0007_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0084_user_0007_scene_0007_cfg_0002_93_036422060215.jpg", + "images/task_0084_user_0007_scene_0007_cfg_0002_107_036422060215.jpg", + "images/task_0084_user_0007_scene_0007_cfg_0002_121_036422060215.jpg", + "images/task_0084_user_0007_scene_0007_cfg_0002_135_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0084", + "split": "eval", + "split_group_id": "task_0084_user_0007_scene_0007_cfg_0002", + "recording_id": "task_0084_user_0007_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0084_user_0007_scene_0007_cfg_0002_121_036422060215.jpg", + "images/task_0084_user_0007_scene_0007_cfg_0002_135_036422060215.jpg", + "images/task_0084_user_0007_scene_0007_cfg_0002_149_036422060215.jpg", + "images/task_0084_user_0007_scene_0007_cfg_0002_163_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0084", + "split": "eval", + "split_group_id": "task_0084_user_0007_scene_0007_cfg_0002", + "recording_id": "task_0084_user_0007_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0084_user_0007_scene_0007_cfg_0002_149_036422060215.jpg", + "images/task_0084_user_0007_scene_0007_cfg_0002_163_036422060215.jpg", + "images/task_0084_user_0007_scene_0007_cfg_0002_177_036422060215.jpg", + "images/task_0084_user_0007_scene_0007_cfg_0002_191_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0084", + "split": "eval", + "split_group_id": "task_0084_user_0007_scene_0007_cfg_0002", + "recording_id": "task_0084_user_0007_scene_0007_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0084_user_0014_scene_0001_cfg_0002_1_036422060215.jpg", + "images/task_0084_user_0014_scene_0001_cfg_0002_6_036422060215.jpg", + "images/task_0084_user_0014_scene_0001_cfg_0002_11_036422060215.jpg", + "images/task_0084_user_0014_scene_0001_cfg_0002_16_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0084", + "split": "eval", + "split_group_id": "task_0084_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0084_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0084_user_0014_scene_0001_cfg_0002_7_036422060215.jpg", + "images/task_0084_user_0014_scene_0001_cfg_0002_12_036422060215.jpg", + "images/task_0084_user_0014_scene_0001_cfg_0002_17_036422060215.jpg", + "images/task_0084_user_0014_scene_0001_cfg_0002_22_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0084", + "split": "eval", + "split_group_id": "task_0084_user_0014_scene_0001_cfg_0002", + "recording_id": "task_0084_user_0014_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0084_user_0014_scene_0003_cfg_0002_1_036422060215.jpg", + "images/task_0084_user_0014_scene_0003_cfg_0002_6_036422060215.jpg", + "images/task_0084_user_0014_scene_0003_cfg_0002_11_036422060215.jpg", + "images/task_0084_user_0014_scene_0003_cfg_0002_16_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0084", + "split": "eval", + "split_group_id": "task_0084_user_0014_scene_0003_cfg_0002", + "recording_id": "task_0084_user_0014_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0084_user_0014_scene_0003_cfg_0002_7_036422060215.jpg", + "images/task_0084_user_0014_scene_0003_cfg_0002_12_036422060215.jpg", + "images/task_0084_user_0014_scene_0003_cfg_0002_17_036422060215.jpg", + "images/task_0084_user_0014_scene_0003_cfg_0002_22_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0084", + "split": "eval", + "split_group_id": "task_0084_user_0014_scene_0003_cfg_0002", + "recording_id": "task_0084_user_0014_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0085_user_0014_scene_0003_cfg_0002_5_036422060215.jpg", + "images/task_0085_user_0014_scene_0003_cfg_0002_12_036422060215.jpg", + "images/task_0085_user_0014_scene_0003_cfg_0002_19_036422060215.jpg", + "images/task_0085_user_0014_scene_0003_cfg_0002_26_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0085", + "split": "eval", + "split_group_id": "task_0085_user_0014_scene_0003_cfg_0002", + "recording_id": "task_0085_user_0014_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0085_user_0014_scene_0003_cfg_0002_19_036422060215.jpg", + "images/task_0085_user_0014_scene_0003_cfg_0002_26_036422060215.jpg", + "images/task_0085_user_0014_scene_0003_cfg_0002_33_036422060215.jpg", + "images/task_0085_user_0014_scene_0003_cfg_0002_40_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0085", + "split": "eval", + "split_group_id": "task_0085_user_0014_scene_0003_cfg_0002", + "recording_id": "task_0085_user_0014_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0085_user_0014_scene_0003_cfg_0002_33_036422060215.jpg", + "images/task_0085_user_0014_scene_0003_cfg_0002_40_036422060215.jpg", + "images/task_0085_user_0014_scene_0003_cfg_0002_47_036422060215.jpg", + "images/task_0085_user_0014_scene_0003_cfg_0002_54_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0085", + "split": "eval", + "split_group_id": "task_0085_user_0014_scene_0003_cfg_0002", + "recording_id": "task_0085_user_0014_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0085_user_0014_scene_0003_cfg_0002_47_036422060215.jpg", + "images/task_0085_user_0014_scene_0003_cfg_0002_54_036422060215.jpg", + "images/task_0085_user_0014_scene_0003_cfg_0002_61_036422060215.jpg", + "images/task_0085_user_0014_scene_0003_cfg_0002_68_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0085", + "split": "eval", + "split_group_id": "task_0085_user_0014_scene_0003_cfg_0002", + "recording_id": "task_0085_user_0014_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0085_user_0014_scene_0003_cfg_0002_61_036422060215.jpg", + "images/task_0085_user_0014_scene_0003_cfg_0002_68_036422060215.jpg", + "images/task_0085_user_0014_scene_0003_cfg_0002_75_036422060215.jpg", + "images/task_0085_user_0014_scene_0003_cfg_0002_82_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0085", + "split": "eval", + "split_group_id": "task_0085_user_0014_scene_0003_cfg_0002", + "recording_id": "task_0085_user_0014_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0085_user_0014_scene_0003_cfg_0002_75_036422060215.jpg", + "images/task_0085_user_0014_scene_0003_cfg_0002_82_036422060215.jpg", + "images/task_0085_user_0014_scene_0003_cfg_0002_89_036422060215.jpg", + "images/task_0085_user_0014_scene_0003_cfg_0002_96_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0085", + "split": "eval", + "split_group_id": "task_0085_user_0014_scene_0003_cfg_0002", + "recording_id": "task_0085_user_0014_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0085_user_0014_scene_0005_cfg_0002_4_036422060215.jpg", + "images/task_0085_user_0014_scene_0005_cfg_0002_11_036422060215.jpg", + "images/task_0085_user_0014_scene_0005_cfg_0002_18_036422060215.jpg", + "images/task_0085_user_0014_scene_0005_cfg_0002_25_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0085", + "split": "eval", + "split_group_id": "task_0085_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0085_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0085_user_0014_scene_0005_cfg_0002_18_036422060215.jpg", + "images/task_0085_user_0014_scene_0005_cfg_0002_25_036422060215.jpg", + "images/task_0085_user_0014_scene_0005_cfg_0002_32_036422060215.jpg", + "images/task_0085_user_0014_scene_0005_cfg_0002_39_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0085", + "split": "eval", + "split_group_id": "task_0085_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0085_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0085_user_0014_scene_0005_cfg_0002_32_036422060215.jpg", + "images/task_0085_user_0014_scene_0005_cfg_0002_39_036422060215.jpg", + "images/task_0085_user_0014_scene_0005_cfg_0002_46_036422060215.jpg", + "images/task_0085_user_0014_scene_0005_cfg_0002_53_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0085", + "split": "eval", + "split_group_id": "task_0085_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0085_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0085_user_0014_scene_0005_cfg_0002_39_036422060215.jpg", + "images/task_0085_user_0014_scene_0005_cfg_0002_46_036422060215.jpg", + "images/task_0085_user_0014_scene_0005_cfg_0002_53_036422060215.jpg", + "images/task_0085_user_0014_scene_0005_cfg_0002_60_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0085", + "split": "eval", + "split_group_id": "task_0085_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0085_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0085_user_0014_scene_0005_cfg_0002_53_036422060215.jpg", + "images/task_0085_user_0014_scene_0005_cfg_0002_60_036422060215.jpg", + "images/task_0085_user_0014_scene_0005_cfg_0002_67_036422060215.jpg", + "images/task_0085_user_0014_scene_0005_cfg_0002_74_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0085", + "split": "eval", + "split_group_id": "task_0085_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0085_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0085_user_0014_scene_0005_cfg_0002_67_036422060215.jpg", + "images/task_0085_user_0014_scene_0005_cfg_0002_74_036422060215.jpg", + "images/task_0085_user_0014_scene_0005_cfg_0002_81_036422060215.jpg", + "images/task_0085_user_0014_scene_0005_cfg_0002_88_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0085", + "split": "eval", + "split_group_id": "task_0085_user_0014_scene_0005_cfg_0002", + "recording_id": "task_0085_user_0014_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0085_user_0014_scene_0006_cfg_0002_2_036422060215.jpg", + "images/task_0085_user_0014_scene_0006_cfg_0002_7_036422060215.jpg", + "images/task_0085_user_0014_scene_0006_cfg_0002_12_036422060215.jpg", + "images/task_0085_user_0014_scene_0006_cfg_0002_17_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0085", + "split": "eval", + "split_group_id": "task_0085_user_0014_scene_0006_cfg_0002", + "recording_id": "task_0085_user_0014_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0085_user_0014_scene_0006_cfg_0002_8_036422060215.jpg", + "images/task_0085_user_0014_scene_0006_cfg_0002_13_036422060215.jpg", + "images/task_0085_user_0014_scene_0006_cfg_0002_18_036422060215.jpg", + "images/task_0085_user_0014_scene_0006_cfg_0002_23_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0085", + "split": "eval", + "split_group_id": "task_0085_user_0014_scene_0006_cfg_0002", + "recording_id": "task_0085_user_0014_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0085_user_0014_scene_0006_cfg_0002_14_036422060215.jpg", + "images/task_0085_user_0014_scene_0006_cfg_0002_19_036422060215.jpg", + "images/task_0085_user_0014_scene_0006_cfg_0002_24_036422060215.jpg", + "images/task_0085_user_0014_scene_0006_cfg_0002_29_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0085", + "split": "eval", + "split_group_id": "task_0085_user_0014_scene_0006_cfg_0002", + "recording_id": "task_0085_user_0014_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0085_user_0014_scene_0006_cfg_0002_20_036422060215.jpg", + "images/task_0085_user_0014_scene_0006_cfg_0002_25_036422060215.jpg", + "images/task_0085_user_0014_scene_0006_cfg_0002_30_036422060215.jpg", + "images/task_0085_user_0014_scene_0006_cfg_0002_35_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0085", + "split": "eval", + "split_group_id": "task_0085_user_0014_scene_0006_cfg_0002", + "recording_id": "task_0085_user_0014_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0085_user_0014_scene_0006_cfg_0002_26_036422060215.jpg", + "images/task_0085_user_0014_scene_0006_cfg_0002_31_036422060215.jpg", + "images/task_0085_user_0014_scene_0006_cfg_0002_36_036422060215.jpg", + "images/task_0085_user_0014_scene_0006_cfg_0002_41_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0085", + "split": "eval", + "split_group_id": "task_0085_user_0014_scene_0006_cfg_0002", + "recording_id": "task_0085_user_0014_scene_0006_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0001_cfg_0002_15_036422060215.jpg", + "images/task_0091_user_0007_scene_0001_cfg_0002_37_036422060215.jpg", + "images/task_0091_user_0007_scene_0001_cfg_0002_59_036422060215.jpg", + "images/task_0091_user_0007_scene_0001_cfg_0002_81_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0001_cfg_0002_59_036422060215.jpg", + "images/task_0091_user_0007_scene_0001_cfg_0002_81_036422060215.jpg", + "images/task_0091_user_0007_scene_0001_cfg_0002_103_036422060215.jpg", + "images/task_0091_user_0007_scene_0001_cfg_0002_125_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0001_cfg_0002_103_036422060215.jpg", + "images/task_0091_user_0007_scene_0001_cfg_0002_125_036422060215.jpg", + "images/task_0091_user_0007_scene_0001_cfg_0002_147_036422060215.jpg", + "images/task_0091_user_0007_scene_0001_cfg_0002_169_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0001_cfg_0002_147_036422060215.jpg", + "images/task_0091_user_0007_scene_0001_cfg_0002_169_036422060215.jpg", + "images/task_0091_user_0007_scene_0001_cfg_0002_191_036422060215.jpg", + "images/task_0091_user_0007_scene_0001_cfg_0002_213_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0001_cfg_0002_191_036422060215.jpg", + "images/task_0091_user_0007_scene_0001_cfg_0002_213_036422060215.jpg", + "images/task_0091_user_0007_scene_0001_cfg_0002_235_036422060215.jpg", + "images/task_0091_user_0007_scene_0001_cfg_0002_257_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0001_cfg_0002_235_036422060215.jpg", + "images/task_0091_user_0007_scene_0001_cfg_0002_257_036422060215.jpg", + "images/task_0091_user_0007_scene_0001_cfg_0002_279_036422060215.jpg", + "images/task_0091_user_0007_scene_0001_cfg_0002_301_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0001_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0001_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0002_cfg_0002_10_036422060215.jpg", + "images/task_0091_user_0007_scene_0002_cfg_0002_26_036422060215.jpg", + "images/task_0091_user_0007_scene_0002_cfg_0002_42_036422060215.jpg", + "images/task_0091_user_0007_scene_0002_cfg_0002_58_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0002_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0002_cfg_0002_42_036422060215.jpg", + "images/task_0091_user_0007_scene_0002_cfg_0002_58_036422060215.jpg", + "images/task_0091_user_0007_scene_0002_cfg_0002_74_036422060215.jpg", + "images/task_0091_user_0007_scene_0002_cfg_0002_90_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0002_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0002_cfg_0002_74_036422060215.jpg", + "images/task_0091_user_0007_scene_0002_cfg_0002_90_036422060215.jpg", + "images/task_0091_user_0007_scene_0002_cfg_0002_106_036422060215.jpg", + "images/task_0091_user_0007_scene_0002_cfg_0002_122_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0002_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0002_cfg_0002_106_036422060215.jpg", + "images/task_0091_user_0007_scene_0002_cfg_0002_122_036422060215.jpg", + "images/task_0091_user_0007_scene_0002_cfg_0002_138_036422060215.jpg", + "images/task_0091_user_0007_scene_0002_cfg_0002_154_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0002_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0002_cfg_0002_138_036422060215.jpg", + "images/task_0091_user_0007_scene_0002_cfg_0002_154_036422060215.jpg", + "images/task_0091_user_0007_scene_0002_cfg_0002_170_036422060215.jpg", + "images/task_0091_user_0007_scene_0002_cfg_0002_186_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0002_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0002_cfg_0002_170_036422060215.jpg", + "images/task_0091_user_0007_scene_0002_cfg_0002_186_036422060215.jpg", + "images/task_0091_user_0007_scene_0002_cfg_0002_202_036422060215.jpg", + "images/task_0091_user_0007_scene_0002_cfg_0002_218_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0002_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0002_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. pre-approach" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_82_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. contact\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. pre-approach" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_100_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. transfer\nC. contact\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. pre-approach" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_118_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. release\nC. contact\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_137_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. hold and carry\nC. pre-approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. approach" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_137_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. release\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_138_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. contact\nC. pre-approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_156_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. contact\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_213_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. pre-approach\nC. release\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. approach" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_220_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. transfer\nC. release\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_231_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. pre-approach\nC. release\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. approach" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_238_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. pre-approach\nC. release\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_240_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. release\nC. transfer\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_272_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. hold and carry\nC. approach\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. hold and carry" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_379_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. release\nC. pre-approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_407_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. contact\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_492_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. release\nC. contact\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_497_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_82_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_100_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_118_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_137_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_138_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_213_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_220_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_231_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_238_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_379_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_492_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Up" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_30_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_40_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_50_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_60_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Up" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_40_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_50_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_60_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_70_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Up" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_50_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_60_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_70_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_80_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_240_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_250_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_260_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_270_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_250_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_260_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_270_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_280_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_260_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_270_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_280_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_290_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Left" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_510_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_520_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_530_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_540_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Left" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_520_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_530_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_540_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_550_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Left" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_530_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_540_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_550_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_560_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_112_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_115_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_118_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_121_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_124_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_266_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_269_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_272_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_275_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_278_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_401_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_404_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_407_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_410_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_413_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_486_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_489_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_492_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_495_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_498_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_12_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_29_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_46_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_63_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_46_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_63_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_80_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_97_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_80_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_97_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_114_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_131_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_114_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_131_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_148_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_165_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_148_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_165_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_182_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_199_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_182_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_199_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_216_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_233_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_145_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_158_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_158_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_290_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_430_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_290_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_140_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_143_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_146_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_149_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_152_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_146_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_149_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_152_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_155_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_158_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_151_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_154_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_157_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_160_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_163_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_298_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_301_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_304_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_307_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_310_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_354_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_357_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_360_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_363_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_366_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_410_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_413_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_416_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_419_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_422_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0003_cfg_0002_285_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_360_036422060215.jpg", + "images/task_0091_user_0007_scene_0003_cfg_0002_435_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0003_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0003_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0005_cfg_0002_25_036422060215.jpg", + "images/task_0091_user_0007_scene_0005_cfg_0002_55_036422060215.jpg", + "images/task_0091_user_0007_scene_0005_cfg_0002_85_036422060215.jpg", + "images/task_0091_user_0007_scene_0005_cfg_0002_115_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0005_cfg_0002_63_036422060215.jpg", + "images/task_0091_user_0007_scene_0005_cfg_0002_93_036422060215.jpg", + "images/task_0091_user_0007_scene_0005_cfg_0002_123_036422060215.jpg", + "images/task_0091_user_0007_scene_0005_cfg_0002_153_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0005_cfg_0002_101_036422060215.jpg", + "images/task_0091_user_0007_scene_0005_cfg_0002_131_036422060215.jpg", + "images/task_0091_user_0007_scene_0005_cfg_0002_161_036422060215.jpg", + "images/task_0091_user_0007_scene_0005_cfg_0002_191_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0005_cfg_0002_139_036422060215.jpg", + "images/task_0091_user_0007_scene_0005_cfg_0002_169_036422060215.jpg", + "images/task_0091_user_0007_scene_0005_cfg_0002_199_036422060215.jpg", + "images/task_0091_user_0007_scene_0005_cfg_0002_229_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0005_cfg_0002_177_036422060215.jpg", + "images/task_0091_user_0007_scene_0005_cfg_0002_207_036422060215.jpg", + "images/task_0091_user_0007_scene_0005_cfg_0002_237_036422060215.jpg", + "images/task_0091_user_0007_scene_0005_cfg_0002_267_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0005_cfg_0002_253_036422060215.jpg", + "images/task_0091_user_0007_scene_0005_cfg_0002_283_036422060215.jpg", + "images/task_0091_user_0007_scene_0005_cfg_0002_313_036422060215.jpg", + "images/task_0091_user_0007_scene_0005_cfg_0002_343_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0005_cfg_0002_291_036422060215.jpg", + "images/task_0091_user_0007_scene_0005_cfg_0002_321_036422060215.jpg", + "images/task_0091_user_0007_scene_0005_cfg_0002_351_036422060215.jpg", + "images/task_0091_user_0007_scene_0005_cfg_0002_381_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0005_cfg_0002_329_036422060215.jpg", + "images/task_0091_user_0007_scene_0005_cfg_0002_359_036422060215.jpg", + "images/task_0091_user_0007_scene_0005_cfg_0002_389_036422060215.jpg", + "images/task_0091_user_0007_scene_0005_cfg_0002_419_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0005_cfg_0002_367_036422060215.jpg", + "images/task_0091_user_0007_scene_0005_cfg_0002_397_036422060215.jpg", + "images/task_0091_user_0007_scene_0005_cfg_0002_427_036422060215.jpg", + "images/task_0091_user_0007_scene_0005_cfg_0002_457_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0091_user_0007_scene_0005_cfg_0002_405_036422060215.jpg", + "images/task_0091_user_0007_scene_0005_cfg_0002_435_036422060215.jpg", + "images/task_0091_user_0007_scene_0005_cfg_0002_465_036422060215.jpg", + "images/task_0091_user_0007_scene_0005_cfg_0002_495_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0091", + "split": "eval", + "split_group_id": "task_0091_user_0007_scene_0005_cfg_0002", + "recording_id": "task_0091_user_0007_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0101_user_0007_scene_0018_cfg_0002_5_036422060215.jpg", + "images/task_0101_user_0007_scene_0018_cfg_0002_13_036422060215.jpg", + "images/task_0101_user_0007_scene_0018_cfg_0002_21_036422060215.jpg", + "images/task_0101_user_0007_scene_0018_cfg_0002_29_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0101", + "split": "eval", + "split_group_id": "task_0101_user_0007_scene_0018_cfg_0002", + "recording_id": "task_0101_user_0007_scene_0018_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0101_user_0007_scene_0018_cfg_0002_13_036422060215.jpg", + "images/task_0101_user_0007_scene_0018_cfg_0002_21_036422060215.jpg", + "images/task_0101_user_0007_scene_0018_cfg_0002_29_036422060215.jpg", + "images/task_0101_user_0007_scene_0018_cfg_0002_37_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0101", + "split": "eval", + "split_group_id": "task_0101_user_0007_scene_0018_cfg_0002", + "recording_id": "task_0101_user_0007_scene_0018_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0101_user_0007_scene_0018_cfg_0002_21_036422060215.jpg", + "images/task_0101_user_0007_scene_0018_cfg_0002_29_036422060215.jpg", + "images/task_0101_user_0007_scene_0018_cfg_0002_37_036422060215.jpg", + "images/task_0101_user_0007_scene_0018_cfg_0002_45_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0101", + "split": "eval", + "split_group_id": "task_0101_user_0007_scene_0018_cfg_0002", + "recording_id": "task_0101_user_0007_scene_0018_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0101_user_0007_scene_0018_cfg_0002_29_036422060215.jpg", + "images/task_0101_user_0007_scene_0018_cfg_0002_37_036422060215.jpg", + "images/task_0101_user_0007_scene_0018_cfg_0002_45_036422060215.jpg", + "images/task_0101_user_0007_scene_0018_cfg_0002_53_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0101", + "split": "eval", + "split_group_id": "task_0101_user_0007_scene_0018_cfg_0002", + "recording_id": "task_0101_user_0007_scene_0018_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0101_user_0007_scene_0018_cfg_0002_37_036422060215.jpg", + "images/task_0101_user_0007_scene_0018_cfg_0002_45_036422060215.jpg", + "images/task_0101_user_0007_scene_0018_cfg_0002_53_036422060215.jpg", + "images/task_0101_user_0007_scene_0018_cfg_0002_61_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0101", + "split": "eval", + "split_group_id": "task_0101_user_0007_scene_0018_cfg_0002", + "recording_id": "task_0101_user_0007_scene_0018_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0101_user_0007_scene_0018_cfg_0002_45_036422060215.jpg", + "images/task_0101_user_0007_scene_0018_cfg_0002_53_036422060215.jpg", + "images/task_0101_user_0007_scene_0018_cfg_0002_61_036422060215.jpg", + "images/task_0101_user_0007_scene_0018_cfg_0002_69_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0101", + "split": "eval", + "split_group_id": "task_0101_user_0007_scene_0018_cfg_0002", + "recording_id": "task_0101_user_0007_scene_0018_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0101_user_0007_scene_0018_cfg_0002_53_036422060215.jpg", + "images/task_0101_user_0007_scene_0018_cfg_0002_61_036422060215.jpg", + "images/task_0101_user_0007_scene_0018_cfg_0002_69_036422060215.jpg", + "images/task_0101_user_0007_scene_0018_cfg_0002_77_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0101", + "split": "eval", + "split_group_id": "task_0101_user_0007_scene_0018_cfg_0002", + "recording_id": "task_0101_user_0007_scene_0018_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0101_user_0007_scene_0018_cfg_0002_61_036422060215.jpg", + "images/task_0101_user_0007_scene_0018_cfg_0002_69_036422060215.jpg", + "images/task_0101_user_0007_scene_0018_cfg_0002_77_036422060215.jpg", + "images/task_0101_user_0007_scene_0018_cfg_0002_85_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0101", + "split": "eval", + "split_group_id": "task_0101_user_0007_scene_0018_cfg_0002", + "recording_id": "task_0101_user_0007_scene_0018_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0101_user_0007_scene_0018_cfg_0002_69_036422060215.jpg", + "images/task_0101_user_0007_scene_0018_cfg_0002_77_036422060215.jpg", + "images/task_0101_user_0007_scene_0018_cfg_0002_85_036422060215.jpg", + "images/task_0101_user_0007_scene_0018_cfg_0002_93_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0101", + "split": "eval", + "split_group_id": "task_0101_user_0007_scene_0018_cfg_0002", + "recording_id": "task_0101_user_0007_scene_0018_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0101_user_0007_scene_0018_cfg_0002_77_036422060215.jpg", + "images/task_0101_user_0007_scene_0018_cfg_0002_85_036422060215.jpg", + "images/task_0101_user_0007_scene_0018_cfg_0002_93_036422060215.jpg", + "images/task_0101_user_0007_scene_0018_cfg_0002_101_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0101", + "split": "eval", + "split_group_id": "task_0101_user_0007_scene_0018_cfg_0002", + "recording_id": "task_0101_user_0007_scene_0018_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. hold and carry\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. pre-approach" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_12_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. pre-approach\nC. approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. pre-approach" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_30_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. contact\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. pre-approach" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_48_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. pre-approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_70_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. hold and carry\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. approach" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_70_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. hold and carry\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_92_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. release\nC. approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_367_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. hold and carry\nC. transfer\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. hold and carry" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_375_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. approach\nC. pre-approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. hold and carry" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_393_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. approach\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. hold and carry" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_411_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. transfer\nC. contact\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_441_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. release\nC. hold and carry\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_472_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. release\nC. pre-approach\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. approach" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_560_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. release\nC. contact\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. approach" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_578_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. pre-approach\nC. approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_578_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. hold and carry\nC. release\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_581_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. hold and carry\nC. contact\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_585_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_12_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_30_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_48_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_70_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_367_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_393_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_411_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_560_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_578_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_581_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Left\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Up" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_50_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_60_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_70_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_80_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Up" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_60_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_70_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_80_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_90_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Up" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_70_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_80_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_90_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_100_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_170_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_180_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_190_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_200_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_180_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_190_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_200_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_210_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_190_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_200_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_210_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_220_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Left" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_420_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_430_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_440_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_450_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Left" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_430_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_440_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_450_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_460_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Left" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_440_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_450_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_460_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_470_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Down" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_710_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_720_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_730_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_740_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Down" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_720_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_730_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_740_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_750_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_6_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_9_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_12_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_15_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_18_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_42_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_45_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_48_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_51_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_54_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_64_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_67_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_70_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_73_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_76_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_369_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_372_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_375_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_378_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_381_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_435_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_438_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_441_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_444_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_447_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_572_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_575_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_578_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_581_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_584_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_16_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_40_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_64_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_88_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_64_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_88_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_112_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_136_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_112_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_136_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_160_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_184_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_160_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_184_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_208_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_232_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_208_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_232_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_256_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_280_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_256_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_280_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_304_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_328_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_93_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_79_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_452_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_569_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_93_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_397_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_397_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_452_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_569_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_586_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_74_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_77_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_80_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_83_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_86_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_80_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_83_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_86_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_89_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_92_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_86_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_89_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_92_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_95_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_98_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_397_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_400_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_403_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_406_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_409_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_418_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_421_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_424_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_427_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_430_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_440_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_443_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_446_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_449_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_452_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_565_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_568_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_571_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_574_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_577_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_572_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_575_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_578_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_581_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_584_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_578_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_581_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_584_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_587_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_590_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_424_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_454_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_395_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_578_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_586_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_569_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0005_cfg_0002_78_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_86_036422060215.jpg", + "images/task_0200_user_0013_scene_0005_cfg_0002_94_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0005_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0005_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0008_cfg_0002_8_036422060215.jpg", + "images/task_0200_user_0013_scene_0008_cfg_0002_20_036422060215.jpg", + "images/task_0200_user_0013_scene_0008_cfg_0002_32_036422060215.jpg", + "images/task_0200_user_0013_scene_0008_cfg_0002_44_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0008_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0008_cfg_0002_20_036422060215.jpg", + "images/task_0200_user_0013_scene_0008_cfg_0002_32_036422060215.jpg", + "images/task_0200_user_0013_scene_0008_cfg_0002_44_036422060215.jpg", + "images/task_0200_user_0013_scene_0008_cfg_0002_56_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0008_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0008_cfg_0002_32_036422060215.jpg", + "images/task_0200_user_0013_scene_0008_cfg_0002_44_036422060215.jpg", + "images/task_0200_user_0013_scene_0008_cfg_0002_56_036422060215.jpg", + "images/task_0200_user_0013_scene_0008_cfg_0002_68_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0008_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0008_cfg_0002_44_036422060215.jpg", + "images/task_0200_user_0013_scene_0008_cfg_0002_56_036422060215.jpg", + "images/task_0200_user_0013_scene_0008_cfg_0002_68_036422060215.jpg", + "images/task_0200_user_0013_scene_0008_cfg_0002_80_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0008_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0008_cfg_0002_56_036422060215.jpg", + "images/task_0200_user_0013_scene_0008_cfg_0002_68_036422060215.jpg", + "images/task_0200_user_0013_scene_0008_cfg_0002_80_036422060215.jpg", + "images/task_0200_user_0013_scene_0008_cfg_0002_92_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0008_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0008_cfg_0002_68_036422060215.jpg", + "images/task_0200_user_0013_scene_0008_cfg_0002_80_036422060215.jpg", + "images/task_0200_user_0013_scene_0008_cfg_0002_92_036422060215.jpg", + "images/task_0200_user_0013_scene_0008_cfg_0002_104_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0008_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0008_cfg_0002_80_036422060215.jpg", + "images/task_0200_user_0013_scene_0008_cfg_0002_92_036422060215.jpg", + "images/task_0200_user_0013_scene_0008_cfg_0002_104_036422060215.jpg", + "images/task_0200_user_0013_scene_0008_cfg_0002_116_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0008_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0008_cfg_0002_92_036422060215.jpg", + "images/task_0200_user_0013_scene_0008_cfg_0002_104_036422060215.jpg", + "images/task_0200_user_0013_scene_0008_cfg_0002_116_036422060215.jpg", + "images/task_0200_user_0013_scene_0008_cfg_0002_128_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0008_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0008_cfg_0002_104_036422060215.jpg", + "images/task_0200_user_0013_scene_0008_cfg_0002_116_036422060215.jpg", + "images/task_0200_user_0013_scene_0008_cfg_0002_128_036422060215.jpg", + "images/task_0200_user_0013_scene_0008_cfg_0002_140_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0008_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0200_user_0013_scene_0008_cfg_0002_116_036422060215.jpg", + "images/task_0200_user_0013_scene_0008_cfg_0002_128_036422060215.jpg", + "images/task_0200_user_0013_scene_0008_cfg_0002_140_036422060215.jpg", + "images/task_0200_user_0013_scene_0008_cfg_0002_152_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0200", + "split": "eval", + "split_group_id": "task_0200_user_0013_scene_0008_cfg_0002", + "recording_id": "task_0200_user_0013_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0202_user_0013_scene_0010_cfg_0002_15_036422060215.jpg", + "images/task_0202_user_0013_scene_0010_cfg_0002_37_036422060215.jpg", + "images/task_0202_user_0013_scene_0010_cfg_0002_59_036422060215.jpg", + "images/task_0202_user_0013_scene_0010_cfg_0002_81_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0202", + "split": "eval", + "split_group_id": "task_0202_user_0013_scene_0010_cfg_0002", + "recording_id": "task_0202_user_0013_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0202_user_0013_scene_0010_cfg_0002_37_036422060215.jpg", + "images/task_0202_user_0013_scene_0010_cfg_0002_59_036422060215.jpg", + "images/task_0202_user_0013_scene_0010_cfg_0002_81_036422060215.jpg", + "images/task_0202_user_0013_scene_0010_cfg_0002_103_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0202", + "split": "eval", + "split_group_id": "task_0202_user_0013_scene_0010_cfg_0002", + "recording_id": "task_0202_user_0013_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0202_user_0013_scene_0010_cfg_0002_59_036422060215.jpg", + "images/task_0202_user_0013_scene_0010_cfg_0002_81_036422060215.jpg", + "images/task_0202_user_0013_scene_0010_cfg_0002_103_036422060215.jpg", + "images/task_0202_user_0013_scene_0010_cfg_0002_125_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0202", + "split": "eval", + "split_group_id": "task_0202_user_0013_scene_0010_cfg_0002", + "recording_id": "task_0202_user_0013_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0202_user_0013_scene_0010_cfg_0002_81_036422060215.jpg", + "images/task_0202_user_0013_scene_0010_cfg_0002_103_036422060215.jpg", + "images/task_0202_user_0013_scene_0010_cfg_0002_125_036422060215.jpg", + "images/task_0202_user_0013_scene_0010_cfg_0002_147_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0202", + "split": "eval", + "split_group_id": "task_0202_user_0013_scene_0010_cfg_0002", + "recording_id": "task_0202_user_0013_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0202_user_0013_scene_0010_cfg_0002_103_036422060215.jpg", + "images/task_0202_user_0013_scene_0010_cfg_0002_125_036422060215.jpg", + "images/task_0202_user_0013_scene_0010_cfg_0002_147_036422060215.jpg", + "images/task_0202_user_0013_scene_0010_cfg_0002_169_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0202", + "split": "eval", + "split_group_id": "task_0202_user_0013_scene_0010_cfg_0002", + "recording_id": "task_0202_user_0013_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0202_user_0013_scene_0010_cfg_0002_147_036422060215.jpg", + "images/task_0202_user_0013_scene_0010_cfg_0002_169_036422060215.jpg", + "images/task_0202_user_0013_scene_0010_cfg_0002_191_036422060215.jpg", + "images/task_0202_user_0013_scene_0010_cfg_0002_213_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0202", + "split": "eval", + "split_group_id": "task_0202_user_0013_scene_0010_cfg_0002", + "recording_id": "task_0202_user_0013_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this episode will fail" + } + ], + "images": [ + "images/task_0202_user_0013_scene_0010_cfg_0002_169_036422060215.jpg", + "images/task_0202_user_0013_scene_0010_cfg_0002_191_036422060215.jpg", + "images/task_0202_user_0013_scene_0010_cfg_0002_213_036422060215.jpg", + "images/task_0202_user_0013_scene_0010_cfg_0002_235_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0202", + "split": "eval", + "split_group_id": "task_0202_user_0013_scene_0010_cfg_0002", + "recording_id": "task_0202_user_0013_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0202_user_0013_scene_0010_cfg_0002_191_036422060215.jpg", + "images/task_0202_user_0013_scene_0010_cfg_0002_213_036422060215.jpg", + "images/task_0202_user_0013_scene_0010_cfg_0002_235_036422060215.jpg", + "images/task_0202_user_0013_scene_0010_cfg_0002_257_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0202", + "split": "eval", + "split_group_id": "task_0202_user_0013_scene_0010_cfg_0002", + "recording_id": "task_0202_user_0013_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0202_user_0013_scene_0010_cfg_0002_213_036422060215.jpg", + "images/task_0202_user_0013_scene_0010_cfg_0002_235_036422060215.jpg", + "images/task_0202_user_0013_scene_0010_cfg_0002_257_036422060215.jpg", + "images/task_0202_user_0013_scene_0010_cfg_0002_279_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0202", + "split": "eval", + "split_group_id": "task_0202_user_0013_scene_0010_cfg_0002", + "recording_id": "task_0202_user_0013_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this episode will fail" + } + ], + "images": [ + "images/task_0202_user_0013_scene_0010_cfg_0002_235_036422060215.jpg", + "images/task_0202_user_0013_scene_0010_cfg_0002_257_036422060215.jpg", + "images/task_0202_user_0013_scene_0010_cfg_0002_279_036422060215.jpg", + "images/task_0202_user_0013_scene_0010_cfg_0002_301_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0202", + "split": "eval", + "split_group_id": "task_0202_user_0013_scene_0010_cfg_0002", + "recording_id": "task_0202_user_0013_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. release\nC. approach\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. pre-approach" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_55_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. pre-approach\nC. transfer\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. pre-approach" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_73_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. contact\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. pre-approach" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_91_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. contact\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_115_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. release\nC. hold and carry\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. approach" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_115_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. hold and carry\nC. pre-approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_131_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. release\nC. contact\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_250_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. contact\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_258_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. contact\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. approach" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_404_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. release\nC. approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_590_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. contact\nC. release\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_600_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. contact\nC. transfer\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_717_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. contact\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_725_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. transfer\nC. pre-approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_1188_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. hold and carry\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. approach" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_1204_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. contact\nC. hold and carry\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. hold and carry" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_1317_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_1347_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. hold and carry\nC. contact\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_1349_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_55_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_73_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_91_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_115_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_250_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_404_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_717_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_1188_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_1204_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_1317_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Up" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_90_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_100_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_110_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_120_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Left\nC. Down\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Up" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_100_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_110_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_120_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_130_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Up" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_110_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_120_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_130_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_140_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Left" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_170_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_180_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_190_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_200_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Left" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_180_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_190_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_200_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_210_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_270_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_280_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_290_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_300_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_280_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_290_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_300_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_310_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_290_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_300_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_310_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_320_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Down" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_310_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_320_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_330_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_340_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Left" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_390_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_400_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_410_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_420_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Down" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_650_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_660_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_670_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_680_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Down" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_830_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_840_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_850_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_860_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_49_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_52_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_55_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_58_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_61_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_85_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_88_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_91_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_94_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_97_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_109_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_112_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_115_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_118_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_121_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_252_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_255_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_258_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_261_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_264_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_1343_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_1346_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_1349_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_1352_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_1355_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_28_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_58_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_88_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_118_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_110_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_140_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_170_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_200_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_192_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_222_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_252_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_282_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_315_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_345_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_375_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_405_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_397_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_427_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_457_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_487_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_479_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_509_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_539_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_569_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_1097_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_1120_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_1024_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_884_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_868_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_884_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_174_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_191_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_527_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_448_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_306_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_270_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_306_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_411_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_748_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_805_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_1120_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_1232_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_581_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_748_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_868_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_805_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_133_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_174_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_133_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_123_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_1097_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_1035_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_448_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_411_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_1232_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_1311_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_191_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_270_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_1024_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_1035_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_581_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_527_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_118_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_121_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_124_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_127_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_130_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_122_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_125_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_128_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_131_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_134_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_126_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_129_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_132_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_135_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_138_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_170_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_173_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_176_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_179_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_182_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_176_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_179_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_182_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_185_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_188_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_183_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_186_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_189_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_192_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_195_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_268_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_271_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_274_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_277_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_280_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_282_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_285_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_288_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_291_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_294_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_296_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_299_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_302_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_305_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_308_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_409_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_412_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_415_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_418_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_421_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_424_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_427_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_430_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_433_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_436_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_438_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_441_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_444_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_447_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_450_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_527_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_530_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_533_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_536_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_539_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_548_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_551_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_554_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_557_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_560_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_569_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_572_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_575_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_578_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_581_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_748_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_751_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_754_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_757_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_760_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_770_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_773_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_776_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_779_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_782_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_793_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_796_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_799_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_802_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_805_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_864_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_867_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_870_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_873_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_876_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_870_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_873_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_876_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_879_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_882_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_876_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_879_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_882_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_885_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_888_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_1019_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_1022_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_1025_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_1028_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_1031_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_1024_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_1027_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_1030_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_1033_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_1036_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_1028_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_1031_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_1034_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_1037_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_1040_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_1094_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_1097_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_1100_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_1103_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_1106_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_1102_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_1105_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_1108_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_1111_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_1114_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_1111_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_1114_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_1117_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_1120_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_1123_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_1234_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_1237_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_1240_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_1243_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_1246_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_1266_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_1269_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_1272_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_1275_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_1278_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_1297_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_1300_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_1303_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_1306_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_1309_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_450_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_430_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_409_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_191_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_182_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_174_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_1229_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_1314_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_1272_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_1108_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_1097_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_1120_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_876_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_884_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_868_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_583_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_554_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_525_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_776_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_807_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_746_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_0215_user_0007_scene_0008_cfg_0002_269_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_288_036422060215.jpg", + "images/task_0215_user_0007_scene_0008_cfg_0002_307_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0215", + "split": "eval", + "split_group_id": "task_0215_user_0007_scene_0008_cfg_0002", + "recording_id": "task_0215_user_0007_scene_0008_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. approach\nC. pre-approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. pre-approach" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_85_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. hold and carry\nC. contact\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. pre-approach" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_103_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. pre-approach" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_121_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. transfer\nC. approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. approach" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_145_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. approach\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_145_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. transfer\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_205_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. release\nC. hold and carry\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. hold and carry" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_213_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. transfer\nC. release\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_267_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. release\nC. contact\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_482_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. pre-approach\nC. hold and carry\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. hold and carry" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_546_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. release\nC. hold and carry\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. approach" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_726_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. pre-approach\nC. approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_776_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. contact\nC. release\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_782_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. contact\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_849_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. pre-approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. approach" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_1031_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. release\nC. transfer\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_1036_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. transfer\nC. contact\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_1048_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. hold and carry\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_1049_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T1", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_85_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_103_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_121_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_145_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_205_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_482_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_546_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_726_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_776_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_1031_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_1036_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T2", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_140_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_150_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_160_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_170_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Down\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_150_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_160_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_170_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_180_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_160_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_170_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_180_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_190_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Down" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_310_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_320_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_330_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_340_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Down" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_320_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_330_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_340_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_350_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Up" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_840_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_850_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_860_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_870_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Up" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_850_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_860_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_870_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_880_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Up" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_860_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_870_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_880_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_890_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Left" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_900_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_910_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_920_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_930_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Left" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_910_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_920_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_930_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_940_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Left" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_920_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_930_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_940_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_950_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T3", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_79_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_82_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_85_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_88_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_91_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_115_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_118_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_121_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_124_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_127_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_139_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_142_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_145_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_148_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_151_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_207_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_210_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_213_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_216_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_219_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_770_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_773_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_776_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_779_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_782_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_843_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_846_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_849_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_852_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_855_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_1042_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_1045_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_1048_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_1051_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_1054_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T6", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_21_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_51_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_81_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_111_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_85_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_115_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_145_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_175_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_149_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_179_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_209_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_239_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. No — this episode will fail\nB. Yes — this episode will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_213_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_243_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_273_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_303_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_277_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_307_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_337_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_367_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the episode, will this manipulation attempt eventually succeed?\nOptions:\nA. Yes — this episode will succeed\nB. No — this episode will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this episode will succeed" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_341_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_371_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_401_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_431_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T7", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_858_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_872_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_946_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_935_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_243_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_179_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_998_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_1038_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_518_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_583_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_946_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_998_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_674_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_734_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_771_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_734_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_872_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_935_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_430_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_396_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_674_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_653_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_583_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_653_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_396_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_243_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_430_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_518_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation scene. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_858_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_771_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_binary", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_180_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_183_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_186_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_189_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_192_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_205_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_208_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_211_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_214_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_217_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_230_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_233_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_236_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_239_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_242_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_394_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_397_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_400_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_403_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_406_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_407_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_410_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_413_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_416_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_419_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_420_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_423_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_426_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_429_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_432_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_519_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_522_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_525_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_528_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_531_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_544_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_547_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_550_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_553_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_556_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_570_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_573_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_576_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_579_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_582_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_649_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_652_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_655_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_658_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_661_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_658_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_661_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_664_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_667_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_670_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_666_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_669_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_672_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_675_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_678_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_732_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_735_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_738_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_741_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_744_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_746_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_749_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_752_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_755_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_758_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_761_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_764_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_767_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_770_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_773_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_853_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_856_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_859_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_862_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_865_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_859_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_862_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_865_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_868_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_871_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_865_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_868_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_871_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_874_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_877_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_930_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_933_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_936_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_939_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_942_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_934_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_937_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_940_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_943_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_946_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_939_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_942_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_945_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_948_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_951_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (the current local step has just started)" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_996_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_999_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_1002_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_1005_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_1008_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (the current local step is underway)" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_1012_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_1015_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_1018_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_1021_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_1024_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (the current local step has just started)\nB. Middle stage (the current local step is underway)\nC. Late stage (the current local step is nearly complete)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (the current local step is nearly complete)" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_1028_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_1031_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_1034_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_1037_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_1040_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_progress", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_177_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_245_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_211_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_857_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_865_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_873_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_431_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_413_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_395_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_1039_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_997_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_1018_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_516_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_550_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_585_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_732_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_752_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_773_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_0216_user_0007_scene_0010_cfg_0002_675_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_652_036422060215.jpg", + "images/task_0216_user_0007_scene_0010_cfg_0002_664_036422060215.jpg" + ], + "dataset": "RH20T", + "source_dataset": "RH20T", + "task_type": "T_temporal", + "task_id": "task_0216", + "split": "eval", + "split_group_id": "task_0216_user_0007_scene_0010_cfg_0002", + "recording_id": "task_0216_user_0007_scene_0010_cfg_0002", + "episode_id": null, + "camera": "036422060215", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. approach\nC. hold and carry\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. pre-approach" + } + ], + "images": [ + "images/2025-01-10-15-39-56_36_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-10-15-39-56", + "split": "eval", + "split_group_id": "2025-01-10-15-39-56", + "recording_id": "2025-01-10-15-39-56", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. contact\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/2025-01-10-15-39-56_169_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-10-15-39-56", + "split": "eval", + "split_group_id": "2025-01-10-15-39-56", + "recording_id": "2025-01-10-15-39-56", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/2025-01-10-15-39-56_365_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-10-15-39-56", + "split": "eval", + "split_group_id": "2025-01-10-15-39-56", + "recording_id": "2025-01-10-15-39-56", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. release\nC. transfer\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/2025-01-10-15-39-56_545_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-10-15-39-56", + "split": "eval", + "split_group_id": "2025-01-10-15-39-56", + "recording_id": "2025-01-10-15-39-56", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. pre-approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/2025-01-10-15-39-56_656_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-10-15-39-56", + "split": "eval", + "split_group_id": "2025-01-10-15-39-56", + "recording_id": "2025-01-10-15-39-56", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/2025-01-10-15-39-56_36_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-10-15-39-56", + "split": "eval", + "split_group_id": "2025-01-10-15-39-56", + "recording_id": "2025-01-10-15-39-56", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-10-15-39-56_169_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-10-15-39-56", + "split": "eval", + "split_group_id": "2025-01-10-15-39-56", + "recording_id": "2025-01-10-15-39-56", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-10-15-39-56_365_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-10-15-39-56", + "split": "eval", + "split_group_id": "2025-01-10-15-39-56", + "recording_id": "2025-01-10-15-39-56", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/2025-01-10-15-39-56_545_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-10-15-39-56", + "split": "eval", + "split_group_id": "2025-01-10-15-39-56", + "recording_id": "2025-01-10-15-39-56", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-10-15-39-56_656_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-10-15-39-56", + "split": "eval", + "split_group_id": "2025-01-10-15-39-56", + "recording_id": "2025-01-10-15-39-56", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-10-15-39-56_14_hand.jpg", + "images/2025-01-10-15-39-56_25_hand.jpg", + "images/2025-01-10-15-39-56_36_hand.jpg", + "images/2025-01-10-15-39-56_48_hand.jpg", + "images/2025-01-10-15-39-56_59_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-10-15-39-56", + "split": "eval", + "split_group_id": "2025-01-10-15-39-56", + "recording_id": "2025-01-10-15-39-56", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-10-15-39-56_304_hand.jpg", + "images/2025-01-10-15-39-56_335_hand.jpg", + "images/2025-01-10-15-39-56_366_hand.jpg", + "images/2025-01-10-15-39-56_396_hand.jpg", + "images/2025-01-10-15-39-56_427_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-10-15-39-56", + "split": "eval", + "split_group_id": "2025-01-10-15-39-56", + "recording_id": "2025-01-10-15-39-56", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-10-15-39-56_495_hand.jpg", + "images/2025-01-10-15-39-56_520_hand.jpg", + "images/2025-01-10-15-39-56_545_hand.jpg", + "images/2025-01-10-15-39-56_570_hand.jpg", + "images/2025-01-10-15-39-56_595_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-10-15-39-56", + "split": "eval", + "split_group_id": "2025-01-10-15-39-56", + "recording_id": "2025-01-10-15-39-56", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-10-15-39-56_636_hand.jpg", + "images/2025-01-10-15-39-56_646_hand.jpg", + "images/2025-01-10-15-39-56_656_hand.jpg", + "images/2025-01-10-15-39-56_666_hand.jpg", + "images/2025-01-10-15-39-56_676_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-10-15-39-56", + "split": "eval", + "split_group_id": "2025-01-10-15-39-56", + "recording_id": "2025-01-10-15-39-56", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-10-15-39-56_122_hand.jpg", + "images/2025-01-10-15-39-56_151_hand.jpg", + "images/2025-01-10-15-39-56_180_hand.jpg", + "images/2025-01-10-15-39-56_210_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-10-15-39-56", + "split": "eval", + "split_group_id": "2025-01-10-15-39-56", + "recording_id": "2025-01-10-15-39-56", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-10-15-39-56_494_hand.jpg", + "images/2025-01-10-15-39-56_510_hand.jpg", + "images/2025-01-10-15-39-56_527_hand.jpg", + "images/2025-01-10-15-39-56_543_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-10-15-39-56", + "split": "eval", + "split_group_id": "2025-01-10-15-39-56", + "recording_id": "2025-01-10-15-39-56", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-10-15-39-56_576_hand.jpg", + "images/2025-01-10-15-39-56_268_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-10-15-39-56", + "split": "eval", + "split_group_id": "2025-01-10-15-39-56", + "recording_id": "2025-01-10-15-39-56", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-10-15-39-56_194_hand.jpg", + "images/2025-01-10-15-39-56_197_hand.jpg", + "images/2025-01-10-15-39-56_200_hand.jpg", + "images/2025-01-10-15-39-56_203_hand.jpg", + "images/2025-01-10-15-39-56_206_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-10-15-39-56", + "split": "eval", + "split_group_id": "2025-01-10-15-39-56", + "recording_id": "2025-01-10-15-39-56", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-10-15-39-56_262_hand.jpg", + "images/2025-01-10-15-39-56_265_hand.jpg", + "images/2025-01-10-15-39-56_268_hand.jpg", + "images/2025-01-10-15-39-56_271_hand.jpg", + "images/2025-01-10-15-39-56_274_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-10-15-39-56", + "split": "eval", + "split_group_id": "2025-01-10-15-39-56", + "recording_id": "2025-01-10-15-39-56", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-10-15-39-56_331_hand.jpg", + "images/2025-01-10-15-39-56_334_hand.jpg", + "images/2025-01-10-15-39-56_337_hand.jpg", + "images/2025-01-10-15-39-56_340_hand.jpg", + "images/2025-01-10-15-39-56_343_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-10-15-39-56", + "split": "eval", + "split_group_id": "2025-01-10-15-39-56", + "recording_id": "2025-01-10-15-39-56", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-10-15-39-56_532_hand.jpg", + "images/2025-01-10-15-39-56_535_hand.jpg", + "images/2025-01-10-15-39-56_538_hand.jpg", + "images/2025-01-10-15-39-56_541_hand.jpg", + "images/2025-01-10-15-39-56_544_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-10-15-39-56", + "split": "eval", + "split_group_id": "2025-01-10-15-39-56", + "recording_id": "2025-01-10-15-39-56", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-10-15-39-56_570_hand.jpg", + "images/2025-01-10-15-39-56_573_hand.jpg", + "images/2025-01-10-15-39-56_576_hand.jpg", + "images/2025-01-10-15-39-56_579_hand.jpg", + "images/2025-01-10-15-39-56_582_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-10-15-39-56", + "split": "eval", + "split_group_id": "2025-01-10-15-39-56", + "recording_id": "2025-01-10-15-39-56", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-10-15-39-56_609_hand.jpg", + "images/2025-01-10-15-39-56_612_hand.jpg", + "images/2025-01-10-15-39-56_615_hand.jpg", + "images/2025-01-10-15-39-56_618_hand.jpg", + "images/2025-01-10-15-39-56_621_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-10-15-39-56", + "split": "eval", + "split_group_id": "2025-01-10-15-39-56", + "recording_id": "2025-01-10-15-39-56", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Pull\nB. Grasp\nC. Nudge\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Grasp" + } + ], + "images": [ + "images/2025-01-10-15-39-56_164_hand.jpg", + "images/2025-01-10-15-39-56_167_hand.jpg", + "images/2025-01-10-15-39-56_170_hand.jpg", + "images/2025-01-10-15-39-56_173_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-10-15-39-56", + "split": "eval", + "split_group_id": "2025-01-10-15-39-56", + "recording_id": "2025-01-10-15-39-56", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Approach\nB. Release\nC. Align\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Pull" + } + ], + "images": [ + "images/2025-01-10-15-39-56_360_hand.jpg", + "images/2025-01-10-15-39-56_363_hand.jpg", + "images/2025-01-10-15-39-56_366_hand.jpg", + "images/2025-01-10-15-39-56_369_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-10-15-39-56", + "split": "eval", + "split_group_id": "2025-01-10-15-39-56", + "recording_id": "2025-01-10-15-39-56", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Release\nB. Twist\nC. Pull\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Approach" + } + ], + "images": [ + "images/2025-01-10-15-39-56_539_hand.jpg", + "images/2025-01-10-15-39-56_542_hand.jpg", + "images/2025-01-10-15-39-56_545_hand.jpg", + "images/2025-01-10-15-39-56_548_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-10-15-39-56", + "split": "eval", + "split_group_id": "2025-01-10-15-39-56", + "recording_id": "2025-01-10-15-39-56", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Push\nB. Release\nC. Lift\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Release" + } + ], + "images": [ + "images/2025-01-10-15-39-56_650_hand.jpg", + "images/2025-01-10-15-39-56_653_hand.jpg", + "images/2025-01-10-15-39-56_656_hand.jpg", + "images/2025-01-10-15-39-56_659_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-10-15-39-56", + "split": "eval", + "split_group_id": "2025-01-10-15-39-56", + "recording_id": "2025-01-10-15-39-56", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Remove waterproof', what low-level action should happen next?\nOptions:\nA. Push\nB. Twist\nC. Pull\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Pull" + } + ], + "images": [ + "images/2025-01-10-15-39-56_197_hand.jpg", + "images/2025-01-10-15-39-56_200_hand.jpg", + "images/2025-01-10-15-39-56_203_hand.jpg", + "images/2025-01-10-15-39-56_206_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-10-15-39-56", + "split": "eval", + "split_group_id": "2025-01-10-15-39-56", + "recording_id": "2025-01-10-15-39-56", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Place waterproof', what low-level action should happen next?\nOptions:\nA. Twist\nB. Push\nC. Grasp\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Release" + } + ], + "images": [ + "images/2025-01-10-15-39-56_566_hand.jpg", + "images/2025-01-10-15-39-56_569_hand.jpg", + "images/2025-01-10-15-39-56_572_hand.jpg", + "images/2025-01-10-15-39-56_575_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-10-15-39-56", + "split": "eval", + "split_group_id": "2025-01-10-15-39-56", + "recording_id": "2025-01-10-15-39-56", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. contact\nC. hold and carry\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. pre-approach" + } + ], + "images": [ + "images/2025-01-11-14-43-37_29_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-11-14-43-37", + "split": "eval", + "split_group_id": "2025-01-11-14-43-37", + "recording_id": "2025-01-11-14-43-37", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. hold and carry\nC. contact\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/2025-01-11-14-43-37_241_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-11-14-43-37", + "split": "eval", + "split_group_id": "2025-01-11-14-43-37", + "recording_id": "2025-01-11-14-43-37", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. contact\nC. approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. hold and carry" + } + ], + "images": [ + "images/2025-01-11-14-43-37_438_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-11-14-43-37", + "split": "eval", + "split_group_id": "2025-01-11-14-43-37", + "recording_id": "2025-01-11-14-43-37", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. release\nC. pre-approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. pre-approach" + } + ], + "images": [ + "images/2025-01-11-14-43-37_536_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-11-14-43-37", + "split": "eval", + "split_group_id": "2025-01-11-14-43-37", + "recording_id": "2025-01-11-14-43-37", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. pre-approach\nC. hold and carry\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. pre-approach" + } + ], + "images": [ + "images/2025-01-11-14-43-37_629_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-11-14-43-37", + "split": "eval", + "split_group_id": "2025-01-11-14-43-37", + "recording_id": "2025-01-11-14-43-37", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/2025-01-11-14-43-37_29_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-11-14-43-37", + "split": "eval", + "split_group_id": "2025-01-11-14-43-37", + "recording_id": "2025-01-11-14-43-37", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-11-14-43-37_241_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-11-14-43-37", + "split": "eval", + "split_group_id": "2025-01-11-14-43-37", + "recording_id": "2025-01-11-14-43-37", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-11-14-43-37_438_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-11-14-43-37", + "split": "eval", + "split_group_id": "2025-01-11-14-43-37", + "recording_id": "2025-01-11-14-43-37", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/2025-01-11-14-43-37_536_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-11-14-43-37", + "split": "eval", + "split_group_id": "2025-01-11-14-43-37", + "recording_id": "2025-01-11-14-43-37", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/2025-01-11-14-43-37_629_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-11-14-43-37", + "split": "eval", + "split_group_id": "2025-01-11-14-43-37", + "recording_id": "2025-01-11-14-43-37", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-11-14-43-37_11_hand.jpg", + "images/2025-01-11-14-43-37_20_hand.jpg", + "images/2025-01-11-14-43-37_30_hand.jpg", + "images/2025-01-11-14-43-37_39_hand.jpg", + "images/2025-01-11-14-43-37_48_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-11-14-43-37", + "split": "eval", + "split_group_id": "2025-01-11-14-43-37", + "recording_id": "2025-01-11-14-43-37", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-11-14-43-37_486_hand.jpg", + "images/2025-01-11-14-43-37_511_hand.jpg", + "images/2025-01-11-14-43-37_536_hand.jpg", + "images/2025-01-11-14-43-37_561_hand.jpg", + "images/2025-01-11-14-43-37_586_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-11-14-43-37", + "split": "eval", + "split_group_id": "2025-01-11-14-43-37", + "recording_id": "2025-01-11-14-43-37", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-11-14-43-37_622_hand.jpg", + "images/2025-01-11-14-43-37_626_hand.jpg", + "images/2025-01-11-14-43-37_629_hand.jpg", + "images/2025-01-11-14-43-37_632_hand.jpg", + "images/2025-01-11-14-43-37_636_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-11-14-43-37", + "split": "eval", + "split_group_id": "2025-01-11-14-43-37", + "recording_id": "2025-01-11-14-43-37", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this operation will fail" + } + ], + "images": [ + "images/2025-01-11-14-43-37_109_hand.jpg", + "images/2025-01-11-14-43-37_139_hand.jpg", + "images/2025-01-11-14-43-37_168_hand.jpg", + "images/2025-01-11-14-43-37_198_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-11-14-43-37", + "split": "eval", + "split_group_id": "2025-01-11-14-43-37", + "recording_id": "2025-01-11-14-43-37", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Grasp\nB. Lift\nC. Pull\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Grasp" + } + ], + "images": [ + "images/2025-01-11-14-43-37_235_hand.jpg", + "images/2025-01-11-14-43-37_238_hand.jpg", + "images/2025-01-11-14-43-37_241_hand.jpg", + "images/2025-01-11-14-43-37_244_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-11-14-43-37", + "split": "eval", + "split_group_id": "2025-01-11-14-43-37", + "recording_id": "2025-01-11-14-43-37", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Nudge\nB. Twist\nC. Lift\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Lift" + } + ], + "images": [ + "images/2025-01-11-14-43-37_432_hand.jpg", + "images/2025-01-11-14-43-37_435_hand.jpg", + "images/2025-01-11-14-43-37_438_hand.jpg", + "images/2025-01-11-14-43-37_441_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-11-14-43-37", + "split": "eval", + "split_group_id": "2025-01-11-14-43-37", + "recording_id": "2025-01-11-14-43-37", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Pick square peg 3', what low-level action should happen next?\nOptions:\nA. Push\nB. Twist\nC. Nudge\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Lift" + } + ], + "images": [ + "images/2025-01-11-14-43-37_298_hand.jpg", + "images/2025-01-11-14-43-37_301_hand.jpg", + "images/2025-01-11-14-43-37_304_hand.jpg", + "images/2025-01-11-14-43-37_307_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-11-14-43-37", + "split": "eval", + "split_group_id": "2025-01-11-14-43-37", + "recording_id": "2025-01-11-14-43-37", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. transfer\nC. pre-approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. pre-approach" + } + ], + "images": [ + "images/2025-01-13-11-18-57_30_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. release\nC. approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/2025-01-13-11-18-57_152_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. hold and carry\nC. contact\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/2025-01-13-11-18-57_344_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. release\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/2025-01-13-11-18-57_537_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. contact\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/2025-01-13-11-18-57_641_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. contact\nC. approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/2025-01-13-11-18-57_666_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. pre-approach\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/2025-01-13-11-18-57_763_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/2025-01-13-11-18-57_854_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. transfer\nC. hold and carry\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/2025-01-13-11-18-57_951_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. approach\nC. transfer\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1053_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. transfer\nC. hold and carry\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1116_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. approach\nC. contact\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1176_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. pre-approach\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1255_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. contact\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1352_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. pre-approach\nC. contact\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1430_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1486_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. pre-approach\nC. transfer\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1571_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. pre-approach\nC. hold and carry\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1700_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. transfer\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1804_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. hold and carry\nC. approach\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1864_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. contact\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1972_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. hold and carry\nC. transfer\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2175_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. hold and carry\nC. transfer\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2287_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. hold and carry\nC. transfer\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2361_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. pre-approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2431_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. hold and carry\nC. approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2541_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. transfer\nC. hold and carry\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2671_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. hold and carry\nC. transfer\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2749_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. release\nC. contact\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2807_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. transfer\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2905_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. contact\nC. approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3046_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. transfer\nC. pre-approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3157_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. pre-approach\nC. contact\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3222_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. hold and carry\nC. contact\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3350_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. hold and carry\nC. pre-approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3538_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3625_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. transfer\nC. pre-approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3706_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. contact\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3777_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. release\nC. approach\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3924_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. hold and carry\nC. pre-approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/2025-01-13-11-18-57_4078_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. hold and carry\nC. approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/2025-01-13-11-18-57_4152_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. transfer\nC. pre-approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/2025-01-13-11-18-57_4235_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. contact\nC. release\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/2025-01-13-11-18-57_4416_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. contact\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/2025-01-13-11-18-57_4628_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. approach\nC. transfer\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/2025-01-13-11-18-57_4752_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. release\nC. contact\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/2025-01-13-11-18-57_4835_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. pre-approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. pre-approach" + } + ], + "images": [ + "images/2025-01-13-11-18-57_4880_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/2025-01-13-11-18-57_30_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-11-18-57_152_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-11-18-57_344_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No �� no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-11-18-57_537_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-11-18-57_641_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-11-18-57_666_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/2025-01-13-11-18-57_763_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-11-18-57_854_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-11-18-57_951_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1053_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1116_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1176_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1255_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1352_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1430_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1486_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1571_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1700_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1804_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1864_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1972_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2175_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2287_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2361_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2431_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2541_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2671_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2749_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2807_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2905_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3046_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3157_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3222_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3350_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3538_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3625_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3706_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3777_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3924_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-11-18-57_4078_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/2025-01-13-11-18-57_4152_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-11-18-57_4235_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-11-18-57_4416_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-11-18-57_4628_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/2025-01-13-11-18-57_4752_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-11-18-57_4835_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/2025-01-13-11-18-57_4880_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_12_hand.jpg", + "images/2025-01-13-11-18-57_21_hand.jpg", + "images/2025-01-13-11-18-57_30_hand.jpg", + "images/2025-01-13-11-18-57_40_hand.jpg", + "images/2025-01-13-11-18-57_49_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_281_hand.jpg", + "images/2025-01-13-11-18-57_313_hand.jpg", + "images/2025-01-13-11-18-57_344_hand.jpg", + "images/2025-01-13-11-18-57_376_hand.jpg", + "images/2025-01-13-11-18-57_408_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_633_hand.jpg", + "images/2025-01-13-11-18-57_637_hand.jpg", + "images/2025-01-13-11-18-57_641_hand.jpg", + "images/2025-01-13-11-18-57_645_hand.jpg", + "images/2025-01-13-11-18-57_649_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_710_hand.jpg", + "images/2025-01-13-11-18-57_736_hand.jpg", + "images/2025-01-13-11-18-57_763_hand.jpg", + "images/2025-01-13-11-18-57_790_hand.jpg", + "images/2025-01-13-11-18-57_816_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_850_hand.jpg", + "images/2025-01-13-11-18-57_852_hand.jpg", + "images/2025-01-13-11-18-57_854_hand.jpg", + "images/2025-01-13-11-18-57_856_hand.jpg", + "images/2025-01-13-11-18-57_858_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_895_hand.jpg", + "images/2025-01-13-11-18-57_923_hand.jpg", + "images/2025-01-13-11-18-57_952_hand.jpg", + "images/2025-01-13-11-18-57_980_hand.jpg", + "images/2025-01-13-11-18-57_1008_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1084_hand.jpg", + "images/2025-01-13-11-18-57_1100_hand.jpg", + "images/2025-01-13-11-18-57_1116_hand.jpg", + "images/2025-01-13-11-18-57_1132_hand.jpg", + "images/2025-01-13-11-18-57_1148_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1170_hand.jpg", + "images/2025-01-13-11-18-57_1173_hand.jpg", + "images/2025-01-13-11-18-57_1176_hand.jpg", + "images/2025-01-13-11-18-57_1179_hand.jpg", + "images/2025-01-13-11-18-57_1182_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1212_hand.jpg", + "images/2025-01-13-11-18-57_1233_hand.jpg", + "images/2025-01-13-11-18-57_1255_hand.jpg", + "images/2025-01-13-11-18-57_1277_hand.jpg", + "images/2025-01-13-11-18-57_1298_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1398_hand.jpg", + "images/2025-01-13-11-18-57_1414_hand.jpg", + "images/2025-01-13-11-18-57_1430_hand.jpg", + "images/2025-01-13-11-18-57_1446_hand.jpg", + "images/2025-01-13-11-18-57_1462_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1483_hand.jpg", + "images/2025-01-13-11-18-57_1485_hand.jpg", + "images/2025-01-13-11-18-57_1486_hand.jpg", + "images/2025-01-13-11-18-57_1488_hand.jpg", + "images/2025-01-13-11-18-57_1490_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1522_hand.jpg", + "images/2025-01-13-11-18-57_1546_hand.jpg", + "images/2025-01-13-11-18-57_1571_hand.jpg", + "images/2025-01-13-11-18-57_1596_hand.jpg", + "images/2025-01-13-11-18-57_1620_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1669_hand.jpg", + "images/2025-01-13-11-18-57_1685_hand.jpg", + "images/2025-01-13-11-18-57_1700_hand.jpg", + "images/2025-01-13-11-18-57_1716_hand.jpg", + "images/2025-01-13-11-18-57_1732_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1772_hand.jpg", + "images/2025-01-13-11-18-57_1788_hand.jpg", + "images/2025-01-13-11-18-57_1804_hand.jpg", + "images/2025-01-13-11-18-57_1821_hand.jpg", + "images/2025-01-13-11-18-57_1837_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1860_hand.jpg", + "images/2025-01-13-11-18-57_1862_hand.jpg", + "images/2025-01-13-11-18-57_1864_hand.jpg", + "images/2025-01-13-11-18-57_1866_hand.jpg", + "images/2025-01-13-11-18-57_1868_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1909_hand.jpg", + "images/2025-01-13-11-18-57_1941_hand.jpg", + "images/2025-01-13-11-18-57_1972_hand.jpg", + "images/2025-01-13-11-18-57_2004_hand.jpg", + "images/2025-01-13-11-18-57_2036_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2111_hand.jpg", + "images/2025-01-13-11-18-57_2143_hand.jpg", + "images/2025-01-13-11-18-57_2175_hand.jpg", + "images/2025-01-13-11-18-57_2207_hand.jpg", + "images/2025-01-13-11-18-57_2239_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2322_hand.jpg", + "images/2025-01-13-11-18-57_2342_hand.jpg", + "images/2025-01-13-11-18-57_2362_hand.jpg", + "images/2025-01-13-11-18-57_2381_hand.jpg", + "images/2025-01-13-11-18-57_2401_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2428_hand.jpg", + "images/2025-01-13-11-18-57_2430_hand.jpg", + "images/2025-01-13-11-18-57_2432_hand.jpg", + "images/2025-01-13-11-18-57_2433_hand.jpg", + "images/2025-01-13-11-18-57_2435_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2720_hand.jpg", + "images/2025-01-13-11-18-57_2735_hand.jpg", + "images/2025-01-13-11-18-57_2750_hand.jpg", + "images/2025-01-13-11-18-57_2764_hand.jpg", + "images/2025-01-13-11-18-57_2779_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2802_hand.jpg", + "images/2025-01-13-11-18-57_2805_hand.jpg", + "images/2025-01-13-11-18-57_2808_hand.jpg", + "images/2025-01-13-11-18-57_2810_hand.jpg", + "images/2025-01-13-11-18-57_2813_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2850_hand.jpg", + "images/2025-01-13-11-18-57_2878_hand.jpg", + "images/2025-01-13-11-18-57_2905_hand.jpg", + "images/2025-01-13-11-18-57_2932_hand.jpg", + "images/2025-01-13-11-18-57_2960_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3012_hand.jpg", + "images/2025-01-13-11-18-57_3029_hand.jpg", + "images/2025-01-13-11-18-57_3046_hand.jpg", + "images/2025-01-13-11-18-57_3063_hand.jpg", + "images/2025-01-13-11-18-57_3080_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3122_hand.jpg", + "images/2025-01-13-11-18-57_3140_hand.jpg", + "images/2025-01-13-11-18-57_3157_hand.jpg", + "images/2025-01-13-11-18-57_3174_hand.jpg", + "images/2025-01-13-11-18-57_3192_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3217_hand.jpg", + "images/2025-01-13-11-18-57_3220_hand.jpg", + "images/2025-01-13-11-18-57_3222_hand.jpg", + "images/2025-01-13-11-18-57_3224_hand.jpg", + "images/2025-01-13-11-18-57_3227_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3670_hand.jpg", + "images/2025-01-13-11-18-57_3688_hand.jpg", + "images/2025-01-13-11-18-57_3706_hand.jpg", + "images/2025-01-13-11-18-57_3724_hand.jpg", + "images/2025-01-13-11-18-57_3742_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3769_hand.jpg", + "images/2025-01-13-11-18-57_3773_hand.jpg", + "images/2025-01-13-11-18-57_3777_hand.jpg", + "images/2025-01-13-11-18-57_3781_hand.jpg", + "images/2025-01-13-11-18-57_3785_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3839_hand.jpg", + "images/2025-01-13-11-18-57_3882_hand.jpg", + "images/2025-01-13-11-18-57_3924_hand.jpg", + "images/2025-01-13-11-18-57_3966_hand.jpg", + "images/2025-01-13-11-18-57_4009_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_4119_hand.jpg", + "images/2025-01-13-11-18-57_4136_hand.jpg", + "images/2025-01-13-11-18-57_4152_hand.jpg", + "images/2025-01-13-11-18-57_4169_hand.jpg", + "images/2025-01-13-11-18-57_4186_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_4218_hand.jpg", + "images/2025-01-13-11-18-57_4226_hand.jpg", + "images/2025-01-13-11-18-57_4235_hand.jpg", + "images/2025-01-13-11-18-57_4244_hand.jpg", + "images/2025-01-13-11-18-57_4252_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_4320_hand.jpg", + "images/2025-01-13-11-18-57_4368_hand.jpg", + "images/2025-01-13-11-18-57_4416_hand.jpg", + "images/2025-01-13-11-18-57_4464_hand.jpg", + "images/2025-01-13-11-18-57_4512_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_4591_hand.jpg", + "images/2025-01-13-11-18-57_4609_hand.jpg", + "images/2025-01-13-11-18-57_4628_hand.jpg", + "images/2025-01-13-11-18-57_4647_hand.jpg", + "images/2025-01-13-11-18-57_4665_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_4712_hand.jpg", + "images/2025-01-13-11-18-57_4732_hand.jpg", + "images/2025-01-13-11-18-57_4752_hand.jpg", + "images/2025-01-13-11-18-57_4773_hand.jpg", + "images/2025-01-13-11-18-57_4793_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_4865_hand.jpg", + "images/2025-01-13-11-18-57_4873_hand.jpg", + "images/2025-01-13-11-18-57_4880_hand.jpg", + "images/2025-01-13-11-18-57_4888_hand.jpg", + "images/2025-01-13-11-18-57_4896_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this operation will fail" + } + ], + "images": [ + "images/2025-01-13-11-18-57_109_hand.jpg", + "images/2025-01-13-11-18-57_138_hand.jpg", + "images/2025-01-13-11-18-57_167_hand.jpg", + "images/2025-01-13-11-18-57_195_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-11-18-57_475_hand.jpg", + "images/2025-01-13-11-18-57_492_hand.jpg", + "images/2025-01-13-11-18-57_510_hand.jpg", + "images/2025-01-13-11-18-57_527_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-11-18-57_701_hand.jpg", + "images/2025-01-13-11-18-57_715_hand.jpg", + "images/2025-01-13-11-18-57_728_hand.jpg", + "images/2025-01-13-11-18-57_742_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-11-18-57_887_hand.jpg", + "images/2025-01-13-11-18-57_902_hand.jpg", + "images/2025-01-13-11-18-57_917_hand.jpg", + "images/2025-01-13-11-18-57_933_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1081_hand.jpg", + "images/2025-01-13-11-18-57_1090_hand.jpg", + "images/2025-01-13-11-18-57_1099_hand.jpg", + "images/2025-01-13-11-18-57_1108_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1210_hand.jpg", + "images/2025-01-13-11-18-57_1225_hand.jpg", + "images/2025-01-13-11-18-57_1239_hand.jpg", + "images/2025-01-13-11-18-57_1254_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1394_hand.jpg", + "images/2025-01-13-11-18-57_1402_hand.jpg", + "images/2025-01-13-11-18-57_1411_hand.jpg", + "images/2025-01-13-11-18-57_1419_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1525_hand.jpg", + "images/2025-01-13-11-18-57_1545_hand.jpg", + "images/2025-01-13-11-18-57_1564_hand.jpg", + "images/2025-01-13-11-18-57_1583_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1768_hand.jpg", + "images/2025-01-13-11-18-57_1777_hand.jpg", + "images/2025-01-13-11-18-57_1786_hand.jpg", + "images/2025-01-13-11-18-57_1794_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1925_hand.jpg", + "images/2025-01-13-11-18-57_1957_hand.jpg", + "images/2025-01-13-11-18-57_1989_hand.jpg", + "images/2025-01-13-11-18-57_2021_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2316_hand.jpg", + "images/2025-01-13-11-18-57_2327_hand.jpg", + "images/2025-01-13-11-18-57_2337_hand.jpg", + "images/2025-01-13-11-18-57_2348_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2472_hand.jpg", + "images/2025-01-13-11-18-57_2491_hand.jpg", + "images/2025-01-13-11-18-57_2511_hand.jpg", + "images/2025-01-13-11-18-57_2530_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2716_hand.jpg", + "images/2025-01-13-11-18-57_2725_hand.jpg", + "images/2025-01-13-11-18-57_2733_hand.jpg", + "images/2025-01-13-11-18-57_2742_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2852_hand.jpg", + "images/2025-01-13-11-18-57_2874_hand.jpg", + "images/2025-01-13-11-18-57_2895_hand.jpg", + "images/2025-01-13-11-18-57_2916_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3117_hand.jpg", + "images/2025-01-13-11-18-57_3127_hand.jpg", + "images/2025-01-13-11-18-57_3136_hand.jpg", + "images/2025-01-13-11-18-57_3146_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this operation will fail" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3260_hand.jpg", + "images/2025-01-13-11-18-57_3278_hand.jpg", + "images/2025-01-13-11-18-57_3296_hand.jpg", + "images/2025-01-13-11-18-57_3314_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3493_hand.jpg", + "images/2025-01-13-11-18-57_3507_hand.jpg", + "images/2025-01-13-11-18-57_3520_hand.jpg", + "images/2025-01-13-11-18-57_3533_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3665_hand.jpg", + "images/2025-01-13-11-18-57_3676_hand.jpg", + "images/2025-01-13-11-18-57_3687_hand.jpg", + "images/2025-01-13-11-18-57_3697_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3828_hand.jpg", + "images/2025-01-13-11-18-57_3851_hand.jpg", + "images/2025-01-13-11-18-57_3874_hand.jpg", + "images/2025-01-13-11-18-57_3897_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-11-18-57_4119_hand.jpg", + "images/2025-01-13-11-18-57_4132_hand.jpg", + "images/2025-01-13-11-18-57_4144_hand.jpg", + "images/2025-01-13-11-18-57_4156_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-11-18-57_4317_hand.jpg", + "images/2025-01-13-11-18-57_4348_hand.jpg", + "images/2025-01-13-11-18-57_4380_hand.jpg", + "images/2025-01-13-11-18-57_4412_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-11-18-57_4708_hand.jpg", + "images/2025-01-13-11-18-57_4721_hand.jpg", + "images/2025-01-13-11-18-57_4733_hand.jpg", + "images/2025-01-13-11-18-57_4746_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/2025-01-13-11-18-57_963_hand.jpg", + "images/2025-01-13-11-18-57_562_hand.jpg", + "images/2025-01-13-11-18-57_769_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1125_hand.jpg", + "images/2025-01-13-11-18-57_769_hand.jpg", + "images/2025-01-13-11-18-57_963_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1282_hand.jpg", + "images/2025-01-13-11-18-57_963_hand.jpg", + "images/2025-01-13-11-18-57_1125_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1125_hand.jpg", + "images/2025-01-13-11-18-57_1436_hand.jpg", + "images/2025-01-13-11-18-57_1282_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1436_hand.jpg", + "images/2025-01-13-11-18-57_1622_hand.jpg", + "images/2025-01-13-11-18-57_1282_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1812_hand.jpg", + "images/2025-01-13-11-18-57_1436_hand.jpg", + "images/2025-01-13-11-18-57_1622_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1622_hand.jpg", + "images/2025-01-13-11-18-57_1812_hand.jpg", + "images/2025-01-13-11-18-57_2085_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1812_hand.jpg", + "images/2025-01-13-11-18-57_2368_hand.jpg", + "images/2025-01-13-11-18-57_2085_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2569_hand.jpg", + "images/2025-01-13-11-18-57_2085_hand.jpg", + "images/2025-01-13-11-18-57_2368_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2569_hand.jpg", + "images/2025-01-13-11-18-57_2368_hand.jpg", + "images/2025-01-13-11-18-57_2759_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2958_hand.jpg", + "images/2025-01-13-11-18-57_2569_hand.jpg", + "images/2025-01-13-11-18-57_2759_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3165_hand.jpg", + "images/2025-01-13-11-18-57_2759_hand.jpg", + "images/2025-01-13-11-18-57_2958_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2958_hand.jpg", + "images/2025-01-13-11-18-57_3559_hand.jpg", + "images/2025-01-13-11-18-57_3165_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3165_hand.jpg", + "images/2025-01-13-11-18-57_3559_hand.jpg", + "images/2025-01-13-11-18-57_3718_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3943_hand.jpg", + "images/2025-01-13-11-18-57_3718_hand.jpg", + "images/2025-01-13-11-18-57_3559_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3943_hand.jpg", + "images/2025-01-13-11-18-57_4181_hand.jpg", + "images/2025-01-13-11-18-57_3718_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3943_hand.jpg", + "images/2025-01-13-11-18-57_4475_hand.jpg", + "images/2025-01-13-11-18-57_4181_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/2025-01-13-11-18-57_4181_hand.jpg", + "images/2025-01-13-11-18-57_4475_hand.jpg", + "images/2025-01-13-11-18-57_4770_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-11-18-57_769_hand.jpg", + "images/2025-01-13-11-18-57_562_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-11-18-57_769_hand.jpg", + "images/2025-01-13-11-18-57_963_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-11-18-57_963_hand.jpg", + "images/2025-01-13-11-18-57_1125_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1125_hand.jpg", + "images/2025-01-13-11-18-57_1282_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1436_hand.jpg", + "images/2025-01-13-11-18-57_1282_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1436_hand.jpg", + "images/2025-01-13-11-18-57_1622_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1622_hand.jpg", + "images/2025-01-13-11-18-57_1812_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2085_hand.jpg", + "images/2025-01-13-11-18-57_1812_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2085_hand.jpg", + "images/2025-01-13-11-18-57_2368_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2368_hand.jpg", + "images/2025-01-13-11-18-57_2569_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2759_hand.jpg", + "images/2025-01-13-11-18-57_2569_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2958_hand.jpg", + "images/2025-01-13-11-18-57_2759_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3165_hand.jpg", + "images/2025-01-13-11-18-57_2958_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3559_hand.jpg", + "images/2025-01-13-11-18-57_3165_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3559_hand.jpg", + "images/2025-01-13-11-18-57_3718_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3943_hand.jpg", + "images/2025-01-13-11-18-57_3718_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3943_hand.jpg", + "images/2025-01-13-11-18-57_4181_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-11-18-57_4475_hand.jpg", + "images/2025-01-13-11-18-57_4181_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-11-18-57_4770_hand.jpg", + "images/2025-01-13-11-18-57_4475_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-11-18-57_562_hand.jpg", + "images/2025-01-13-11-18-57_963_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-11-18-57_769_hand.jpg", + "images/2025-01-13-11-18-57_1125_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1282_hand.jpg", + "images/2025-01-13-11-18-57_963_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1436_hand.jpg", + "images/2025-01-13-11-18-57_1125_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1282_hand.jpg", + "images/2025-01-13-11-18-57_1622_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1812_hand.jpg", + "images/2025-01-13-11-18-57_1436_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2085_hand.jpg", + "images/2025-01-13-11-18-57_1622_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2368_hand.jpg", + "images/2025-01-13-11-18-57_1812_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2569_hand.jpg", + "images/2025-01-13-11-18-57_2085_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2759_hand.jpg", + "images/2025-01-13-11-18-57_2368_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2569_hand.jpg", + "images/2025-01-13-11-18-57_2958_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3165_hand.jpg", + "images/2025-01-13-11-18-57_2759_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3559_hand.jpg", + "images/2025-01-13-11-18-57_2958_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3165_hand.jpg", + "images/2025-01-13-11-18-57_3718_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3943_hand.jpg", + "images/2025-01-13-11-18-57_3559_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3718_hand.jpg", + "images/2025-01-13-11-18-57_4181_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3943_hand.jpg", + "images/2025-01-13-11-18-57_4475_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-11-18-57_4181_hand.jpg", + "images/2025-01-13-11-18-57_4770_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_516_hand.jpg", + "images/2025-01-13-11-18-57_519_hand.jpg", + "images/2025-01-13-11-18-57_522_hand.jpg", + "images/2025-01-13-11-18-57_525_hand.jpg", + "images/2025-01-13-11-18-57_528_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_556_hand.jpg", + "images/2025-01-13-11-18-57_559_hand.jpg", + "images/2025-01-13-11-18-57_562_hand.jpg", + "images/2025-01-13-11-18-57_565_hand.jpg", + "images/2025-01-13-11-18-57_568_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_596_hand.jpg", + "images/2025-01-13-11-18-57_599_hand.jpg", + "images/2025-01-13-11-18-57_602_hand.jpg", + "images/2025-01-13-11-18-57_605_hand.jpg", + "images/2025-01-13-11-18-57_608_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_732_hand.jpg", + "images/2025-01-13-11-18-57_735_hand.jpg", + "images/2025-01-13-11-18-57_738_hand.jpg", + "images/2025-01-13-11-18-57_741_hand.jpg", + "images/2025-01-13-11-18-57_744_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_764_hand.jpg", + "images/2025-01-13-11-18-57_767_hand.jpg", + "images/2025-01-13-11-18-57_770_hand.jpg", + "images/2025-01-13-11-18-57_773_hand.jpg", + "images/2025-01-13-11-18-57_776_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_795_hand.jpg", + "images/2025-01-13-11-18-57_798_hand.jpg", + "images/2025-01-13-11-18-57_801_hand.jpg", + "images/2025-01-13-11-18-57_804_hand.jpg", + "images/2025-01-13-11-18-57_807_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_922_hand.jpg", + "images/2025-01-13-11-18-57_925_hand.jpg", + "images/2025-01-13-11-18-57_928_hand.jpg", + "images/2025-01-13-11-18-57_931_hand.jpg", + "images/2025-01-13-11-18-57_934_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_957_hand.jpg", + "images/2025-01-13-11-18-57_960_hand.jpg", + "images/2025-01-13-11-18-57_963_hand.jpg", + "images/2025-01-13-11-18-57_966_hand.jpg", + "images/2025-01-13-11-18-57_969_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_992_hand.jpg", + "images/2025-01-13-11-18-57_995_hand.jpg", + "images/2025-01-13-11-18-57_998_hand.jpg", + "images/2025-01-13-11-18-57_1001_hand.jpg", + "images/2025-01-13-11-18-57_1004_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1099_hand.jpg", + "images/2025-01-13-11-18-57_1102_hand.jpg", + "images/2025-01-13-11-18-57_1105_hand.jpg", + "images/2025-01-13-11-18-57_1108_hand.jpg", + "images/2025-01-13-11-18-57_1111_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1120_hand.jpg", + "images/2025-01-13-11-18-57_1123_hand.jpg", + "images/2025-01-13-11-18-57_1126_hand.jpg", + "images/2025-01-13-11-18-57_1129_hand.jpg", + "images/2025-01-13-11-18-57_1132_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1140_hand.jpg", + "images/2025-01-13-11-18-57_1143_hand.jpg", + "images/2025-01-13-11-18-57_1146_hand.jpg", + "images/2025-01-13-11-18-57_1149_hand.jpg", + "images/2025-01-13-11-18-57_1152_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1243_hand.jpg", + "images/2025-01-13-11-18-57_1246_hand.jpg", + "images/2025-01-13-11-18-57_1249_hand.jpg", + "images/2025-01-13-11-18-57_1252_hand.jpg", + "images/2025-01-13-11-18-57_1255_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1276_hand.jpg", + "images/2025-01-13-11-18-57_1279_hand.jpg", + "images/2025-01-13-11-18-57_1282_hand.jpg", + "images/2025-01-13-11-18-57_1285_hand.jpg", + "images/2025-01-13-11-18-57_1288_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1310_hand.jpg", + "images/2025-01-13-11-18-57_1313_hand.jpg", + "images/2025-01-13-11-18-57_1316_hand.jpg", + "images/2025-01-13-11-18-57_1319_hand.jpg", + "images/2025-01-13-11-18-57_1322_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1410_hand.jpg", + "images/2025-01-13-11-18-57_1413_hand.jpg", + "images/2025-01-13-11-18-57_1416_hand.jpg", + "images/2025-01-13-11-18-57_1419_hand.jpg", + "images/2025-01-13-11-18-57_1422_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1430_hand.jpg", + "images/2025-01-13-11-18-57_1433_hand.jpg", + "images/2025-01-13-11-18-57_1436_hand.jpg", + "images/2025-01-13-11-18-57_1439_hand.jpg", + "images/2025-01-13-11-18-57_1442_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1450_hand.jpg", + "images/2025-01-13-11-18-57_1453_hand.jpg", + "images/2025-01-13-11-18-57_1456_hand.jpg", + "images/2025-01-13-11-18-57_1459_hand.jpg", + "images/2025-01-13-11-18-57_1462_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1571_hand.jpg", + "images/2025-01-13-11-18-57_1574_hand.jpg", + "images/2025-01-13-11-18-57_1577_hand.jpg", + "images/2025-01-13-11-18-57_1580_hand.jpg", + "images/2025-01-13-11-18-57_1583_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1616_hand.jpg", + "images/2025-01-13-11-18-57_1619_hand.jpg", + "images/2025-01-13-11-18-57_1622_hand.jpg", + "images/2025-01-13-11-18-57_1625_hand.jpg", + "images/2025-01-13-11-18-57_1628_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1661_hand.jpg", + "images/2025-01-13-11-18-57_1664_hand.jpg", + "images/2025-01-13-11-18-57_1667_hand.jpg", + "images/2025-01-13-11-18-57_1670_hand.jpg", + "images/2025-01-13-11-18-57_1673_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1786_hand.jpg", + "images/2025-01-13-11-18-57_1789_hand.jpg", + "images/2025-01-13-11-18-57_1792_hand.jpg", + "images/2025-01-13-11-18-57_1795_hand.jpg", + "images/2025-01-13-11-18-57_1798_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1806_hand.jpg", + "images/2025-01-13-11-18-57_1809_hand.jpg", + "images/2025-01-13-11-18-57_1812_hand.jpg", + "images/2025-01-13-11-18-57_1815_hand.jpg", + "images/2025-01-13-11-18-57_1818_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1826_hand.jpg", + "images/2025-01-13-11-18-57_1829_hand.jpg", + "images/2025-01-13-11-18-57_1832_hand.jpg", + "images/2025-01-13-11-18-57_1835_hand.jpg", + "images/2025-01-13-11-18-57_1838_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2004_hand.jpg", + "images/2025-01-13-11-18-57_2007_hand.jpg", + "images/2025-01-13-11-18-57_2010_hand.jpg", + "images/2025-01-13-11-18-57_2013_hand.jpg", + "images/2025-01-13-11-18-57_2016_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2079_hand.jpg", + "images/2025-01-13-11-18-57_2082_hand.jpg", + "images/2025-01-13-11-18-57_2085_hand.jpg", + "images/2025-01-13-11-18-57_2088_hand.jpg", + "images/2025-01-13-11-18-57_2091_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2154_hand.jpg", + "images/2025-01-13-11-18-57_2157_hand.jpg", + "images/2025-01-13-11-18-57_2160_hand.jpg", + "images/2025-01-13-11-18-57_2163_hand.jpg", + "images/2025-01-13-11-18-57_2166_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2338_hand.jpg", + "images/2025-01-13-11-18-57_2341_hand.jpg", + "images/2025-01-13-11-18-57_2344_hand.jpg", + "images/2025-01-13-11-18-57_2347_hand.jpg", + "images/2025-01-13-11-18-57_2350_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2362_hand.jpg", + "images/2025-01-13-11-18-57_2365_hand.jpg", + "images/2025-01-13-11-18-57_2368_hand.jpg", + "images/2025-01-13-11-18-57_2371_hand.jpg", + "images/2025-01-13-11-18-57_2374_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2387_hand.jpg", + "images/2025-01-13-11-18-57_2390_hand.jpg", + "images/2025-01-13-11-18-57_2393_hand.jpg", + "images/2025-01-13-11-18-57_2396_hand.jpg", + "images/2025-01-13-11-18-57_2399_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2518_hand.jpg", + "images/2025-01-13-11-18-57_2521_hand.jpg", + "images/2025-01-13-11-18-57_2524_hand.jpg", + "images/2025-01-13-11-18-57_2527_hand.jpg", + "images/2025-01-13-11-18-57_2530_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2564_hand.jpg", + "images/2025-01-13-11-18-57_2567_hand.jpg", + "images/2025-01-13-11-18-57_2570_hand.jpg", + "images/2025-01-13-11-18-57_2573_hand.jpg", + "images/2025-01-13-11-18-57_2576_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2609_hand.jpg", + "images/2025-01-13-11-18-57_2612_hand.jpg", + "images/2025-01-13-11-18-57_2615_hand.jpg", + "images/2025-01-13-11-18-57_2618_hand.jpg", + "images/2025-01-13-11-18-57_2621_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2733_hand.jpg", + "images/2025-01-13-11-18-57_2736_hand.jpg", + "images/2025-01-13-11-18-57_2739_hand.jpg", + "images/2025-01-13-11-18-57_2742_hand.jpg", + "images/2025-01-13-11-18-57_2745_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2754_hand.jpg", + "images/2025-01-13-11-18-57_2757_hand.jpg", + "images/2025-01-13-11-18-57_2760_hand.jpg", + "images/2025-01-13-11-18-57_2763_hand.jpg", + "images/2025-01-13-11-18-57_2766_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2774_hand.jpg", + "images/2025-01-13-11-18-57_2777_hand.jpg", + "images/2025-01-13-11-18-57_2780_hand.jpg", + "images/2025-01-13-11-18-57_2783_hand.jpg", + "images/2025-01-13-11-18-57_2786_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2903_hand.jpg", + "images/2025-01-13-11-18-57_2906_hand.jpg", + "images/2025-01-13-11-18-57_2909_hand.jpg", + "images/2025-01-13-11-18-57_2912_hand.jpg", + "images/2025-01-13-11-18-57_2915_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2952_hand.jpg", + "images/2025-01-13-11-18-57_2955_hand.jpg", + "images/2025-01-13-11-18-57_2958_hand.jpg", + "images/2025-01-13-11-18-57_2961_hand.jpg", + "images/2025-01-13-11-18-57_2964_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3002_hand.jpg", + "images/2025-01-13-11-18-57_3005_hand.jpg", + "images/2025-01-13-11-18-57_3008_hand.jpg", + "images/2025-01-13-11-18-57_3011_hand.jpg", + "images/2025-01-13-11-18-57_3014_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3137_hand.jpg", + "images/2025-01-13-11-18-57_3140_hand.jpg", + "images/2025-01-13-11-18-57_3143_hand.jpg", + "images/2025-01-13-11-18-57_3146_hand.jpg", + "images/2025-01-13-11-18-57_3149_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3160_hand.jpg", + "images/2025-01-13-11-18-57_3163_hand.jpg", + "images/2025-01-13-11-18-57_3166_hand.jpg", + "images/2025-01-13-11-18-57_3169_hand.jpg", + "images/2025-01-13-11-18-57_3172_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3182_hand.jpg", + "images/2025-01-13-11-18-57_3185_hand.jpg", + "images/2025-01-13-11-18-57_3188_hand.jpg", + "images/2025-01-13-11-18-57_3191_hand.jpg", + "images/2025-01-13-11-18-57_3194_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3522_hand.jpg", + "images/2025-01-13-11-18-57_3525_hand.jpg", + "images/2025-01-13-11-18-57_3528_hand.jpg", + "images/2025-01-13-11-18-57_3531_hand.jpg", + "images/2025-01-13-11-18-57_3534_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3553_hand.jpg", + "images/2025-01-13-11-18-57_3556_hand.jpg", + "images/2025-01-13-11-18-57_3559_hand.jpg", + "images/2025-01-13-11-18-57_3562_hand.jpg", + "images/2025-01-13-11-18-57_3565_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3584_hand.jpg", + "images/2025-01-13-11-18-57_3587_hand.jpg", + "images/2025-01-13-11-18-57_3590_hand.jpg", + "images/2025-01-13-11-18-57_3593_hand.jpg", + "images/2025-01-13-11-18-57_3596_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3688_hand.jpg", + "images/2025-01-13-11-18-57_3691_hand.jpg", + "images/2025-01-13-11-18-57_3694_hand.jpg", + "images/2025-01-13-11-18-57_3697_hand.jpg", + "images/2025-01-13-11-18-57_3700_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3712_hand.jpg", + "images/2025-01-13-11-18-57_3715_hand.jpg", + "images/2025-01-13-11-18-57_3718_hand.jpg", + "images/2025-01-13-11-18-57_3721_hand.jpg", + "images/2025-01-13-11-18-57_3724_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3737_hand.jpg", + "images/2025-01-13-11-18-57_3740_hand.jpg", + "images/2025-01-13-11-18-57_3743_hand.jpg", + "images/2025-01-13-11-18-57_3746_hand.jpg", + "images/2025-01-13-11-18-57_3749_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3884_hand.jpg", + "images/2025-01-13-11-18-57_3887_hand.jpg", + "images/2025-01-13-11-18-57_3890_hand.jpg", + "images/2025-01-13-11-18-57_3893_hand.jpg", + "images/2025-01-13-11-18-57_3896_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3938_hand.jpg", + "images/2025-01-13-11-18-57_3941_hand.jpg", + "images/2025-01-13-11-18-57_3944_hand.jpg", + "images/2025-01-13-11-18-57_3947_hand.jpg", + "images/2025-01-13-11-18-57_3950_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3991_hand.jpg", + "images/2025-01-13-11-18-57_3994_hand.jpg", + "images/2025-01-13-11-18-57_3997_hand.jpg", + "images/2025-01-13-11-18-57_4000_hand.jpg", + "images/2025-01-13-11-18-57_4003_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_4146_hand.jpg", + "images/2025-01-13-11-18-57_4149_hand.jpg", + "images/2025-01-13-11-18-57_4152_hand.jpg", + "images/2025-01-13-11-18-57_4155_hand.jpg", + "images/2025-01-13-11-18-57_4158_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_4175_hand.jpg", + "images/2025-01-13-11-18-57_4178_hand.jpg", + "images/2025-01-13-11-18-57_4181_hand.jpg", + "images/2025-01-13-11-18-57_4184_hand.jpg", + "images/2025-01-13-11-18-57_4187_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_4204_hand.jpg", + "images/2025-01-13-11-18-57_4207_hand.jpg", + "images/2025-01-13-11-18-57_4210_hand.jpg", + "images/2025-01-13-11-18-57_4213_hand.jpg", + "images/2025-01-13-11-18-57_4216_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_4396_hand.jpg", + "images/2025-01-13-11-18-57_4399_hand.jpg", + "images/2025-01-13-11-18-57_4402_hand.jpg", + "images/2025-01-13-11-18-57_4405_hand.jpg", + "images/2025-01-13-11-18-57_4408_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_4470_hand.jpg", + "images/2025-01-13-11-18-57_4473_hand.jpg", + "images/2025-01-13-11-18-57_4476_hand.jpg", + "images/2025-01-13-11-18-57_4479_hand.jpg", + "images/2025-01-13-11-18-57_4482_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_4543_hand.jpg", + "images/2025-01-13-11-18-57_4546_hand.jpg", + "images/2025-01-13-11-18-57_4549_hand.jpg", + "images/2025-01-13-11-18-57_4552_hand.jpg", + "images/2025-01-13-11-18-57_4555_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_4736_hand.jpg", + "images/2025-01-13-11-18-57_4739_hand.jpg", + "images/2025-01-13-11-18-57_4742_hand.jpg", + "images/2025-01-13-11-18-57_4745_hand.jpg", + "images/2025-01-13-11-18-57_4748_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_4764_hand.jpg", + "images/2025-01-13-11-18-57_4767_hand.jpg", + "images/2025-01-13-11-18-57_4770_hand.jpg", + "images/2025-01-13-11-18-57_4773_hand.jpg", + "images/2025-01-13-11-18-57_4776_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-11-18-57_4793_hand.jpg", + "images/2025-01-13-11-18-57_4796_hand.jpg", + "images/2025-01-13-11-18-57_4799_hand.jpg", + "images/2025-01-13-11-18-57_4802_hand.jpg", + "images/2025-01-13-11-18-57_4805_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Push\nB. Grasp\nC. Release\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Grasp" + } + ], + "images": [ + "images/2025-01-13-11-18-57_146_hand.jpg", + "images/2025-01-13-11-18-57_149_hand.jpg", + "images/2025-01-13-11-18-57_152_hand.jpg", + "images/2025-01-13-11-18-57_155_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Twist\nB. Lift\nC. Push\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Twist" + } + ], + "images": [ + "images/2025-01-13-11-18-57_338_hand.jpg", + "images/2025-01-13-11-18-57_341_hand.jpg", + "images/2025-01-13-11-18-57_344_hand.jpg", + "images/2025-01-13-11-18-57_347_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Approach\nB. Nudge\nC. Align\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Grasp" + } + ], + "images": [ + "images/2025-01-13-11-18-57_531_hand.jpg", + "images/2025-01-13-11-18-57_534_hand.jpg", + "images/2025-01-13-11-18-57_537_hand.jpg", + "images/2025-01-13-11-18-57_540_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Pull\nB. Grasp\nC. Lift\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Twist" + } + ], + "images": [ + "images/2025-01-13-11-18-57_635_hand.jpg", + "images/2025-01-13-11-18-57_638_hand.jpg", + "images/2025-01-13-11-18-57_641_hand.jpg", + "images/2025-01-13-11-18-57_644_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Release\nB. Pull\nC. Align\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Pull" + } + ], + "images": [ + "images/2025-01-13-11-18-57_660_hand.jpg", + "images/2025-01-13-11-18-57_663_hand.jpg", + "images/2025-01-13-11-18-57_666_hand.jpg", + "images/2025-01-13-11-18-57_669_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Twist\nB. Approach\nC. Pull\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Approach" + } + ], + "images": [ + "images/2025-01-13-11-18-57_757_hand.jpg", + "images/2025-01-13-11-18-57_760_hand.jpg", + "images/2025-01-13-11-18-57_763_hand.jpg", + "images/2025-01-13-11-18-57_766_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Push\nB. Twist\nC. Release\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Release" + } + ], + "images": [ + "images/2025-01-13-11-18-57_848_hand.jpg", + "images/2025-01-13-11-18-57_851_hand.jpg", + "images/2025-01-13-11-18-57_854_hand.jpg", + "images/2025-01-13-11-18-57_857_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Nudge\nB. Grasp\nC. Pull\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Grasp" + } + ], + "images": [ + "images/2025-01-13-11-18-57_946_hand.jpg", + "images/2025-01-13-11-18-57_949_hand.jpg", + "images/2025-01-13-11-18-57_952_hand.jpg", + "images/2025-01-13-11-18-57_955_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Approach\nB. Nudge\nC. Pull\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Pull" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1047_hand.jpg", + "images/2025-01-13-11-18-57_1050_hand.jpg", + "images/2025-01-13-11-18-57_1053_hand.jpg", + "images/2025-01-13-11-18-57_1056_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Approach\nB. Twist\nC. Lift\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Approach" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1110_hand.jpg", + "images/2025-01-13-11-18-57_1113_hand.jpg", + "images/2025-01-13-11-18-57_1116_hand.jpg", + "images/2025-01-13-11-18-57_1119_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Push\nB. Release\nC. Align\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Release" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1170_hand.jpg", + "images/2025-01-13-11-18-57_1173_hand.jpg", + "images/2025-01-13-11-18-57_1176_hand.jpg", + "images/2025-01-13-11-18-57_1179_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Grasp\nB. Twist\nC. Nudge\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Grasp" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1249_hand.jpg", + "images/2025-01-13-11-18-57_1252_hand.jpg", + "images/2025-01-13-11-18-57_1255_hand.jpg", + "images/2025-01-13-11-18-57_1258_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Pull\nB. Grasp\nC. Twist\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Pull" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1346_hand.jpg", + "images/2025-01-13-11-18-57_1349_hand.jpg", + "images/2025-01-13-11-18-57_1352_hand.jpg", + "images/2025-01-13-11-18-57_1355_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Lift\nB. Approach\nC. Nudge\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Approach" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1424_hand.jpg", + "images/2025-01-13-11-18-57_1427_hand.jpg", + "images/2025-01-13-11-18-57_1430_hand.jpg", + "images/2025-01-13-11-18-57_1433_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Push\nB. Twist\nC. Grasp\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Release" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1482_hand.jpg", + "images/2025-01-13-11-18-57_1485_hand.jpg", + "images/2025-01-13-11-18-57_1488_hand.jpg", + "images/2025-01-13-11-18-57_1491_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Grasp\nB. Approach\nC. Release\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Grasp" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1565_hand.jpg", + "images/2025-01-13-11-18-57_1568_hand.jpg", + "images/2025-01-13-11-18-57_1571_hand.jpg", + "images/2025-01-13-11-18-57_1574_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Twist\nB. Pull\nC. Release\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Pull" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1694_hand.jpg", + "images/2025-01-13-11-18-57_1697_hand.jpg", + "images/2025-01-13-11-18-57_1700_hand.jpg", + "images/2025-01-13-11-18-57_1703_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Push\nB. Approach\nC. Nudge\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Approach" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1798_hand.jpg", + "images/2025-01-13-11-18-57_1801_hand.jpg", + "images/2025-01-13-11-18-57_1804_hand.jpg", + "images/2025-01-13-11-18-57_1807_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Push\nB. Grasp\nC. Release\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Release" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1858_hand.jpg", + "images/2025-01-13-11-18-57_1861_hand.jpg", + "images/2025-01-13-11-18-57_1864_hand.jpg", + "images/2025-01-13-11-18-57_1867_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Twist\nB. Nudge\nC. Align\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Grasp" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1966_hand.jpg", + "images/2025-01-13-11-18-57_1969_hand.jpg", + "images/2025-01-13-11-18-57_1972_hand.jpg", + "images/2025-01-13-11-18-57_1975_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Push\nB. Pull\nC. Twist\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Twist" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2169_hand.jpg", + "images/2025-01-13-11-18-57_2172_hand.jpg", + "images/2025-01-13-11-18-57_2175_hand.jpg", + "images/2025-01-13-11-18-57_2178_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Release\nC. Pull\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Pull" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2282_hand.jpg", + "images/2025-01-13-11-18-57_2285_hand.jpg", + "images/2025-01-13-11-18-57_2288_hand.jpg", + "images/2025-01-13-11-18-57_2291_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Pull\nB. Approach\nC. Twist\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Approach" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2356_hand.jpg", + "images/2025-01-13-11-18-57_2359_hand.jpg", + "images/2025-01-13-11-18-57_2362_hand.jpg", + "images/2025-01-13-11-18-57_2365_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Pull\nB. Twist\nC. Lift\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Release" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2426_hand.jpg", + "images/2025-01-13-11-18-57_2429_hand.jpg", + "images/2025-01-13-11-18-57_2432_hand.jpg", + "images/2025-01-13-11-18-57_2435_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Twist\nB. Push\nC. Release\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Grasp" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2536_hand.jpg", + "images/2025-01-13-11-18-57_2539_hand.jpg", + "images/2025-01-13-11-18-57_2542_hand.jpg", + "images/2025-01-13-11-18-57_2545_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Push\nC. Pull\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Pull" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2665_hand.jpg", + "images/2025-01-13-11-18-57_2668_hand.jpg", + "images/2025-01-13-11-18-57_2671_hand.jpg", + "images/2025-01-13-11-18-57_2674_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Release\nB. Grasp\nC. Align\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Approach" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2744_hand.jpg", + "images/2025-01-13-11-18-57_2747_hand.jpg", + "images/2025-01-13-11-18-57_2750_hand.jpg", + "images/2025-01-13-11-18-57_2753_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Twist\nB. Pull\nC. Release\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Release" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2802_hand.jpg", + "images/2025-01-13-11-18-57_2805_hand.jpg", + "images/2025-01-13-11-18-57_2808_hand.jpg", + "images/2025-01-13-11-18-57_2811_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Pull\nB. Grasp\nC. Lift\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Grasp" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2899_hand.jpg", + "images/2025-01-13-11-18-57_2902_hand.jpg", + "images/2025-01-13-11-18-57_2905_hand.jpg", + "images/2025-01-13-11-18-57_2908_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Approach\nB. Lift\nC. Pull\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Pull" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3040_hand.jpg", + "images/2025-01-13-11-18-57_3043_hand.jpg", + "images/2025-01-13-11-18-57_3046_hand.jpg", + "images/2025-01-13-11-18-57_3049_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Twist\nB. Grasp\nC. Lift\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Approach" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3151_hand.jpg", + "images/2025-01-13-11-18-57_3154_hand.jpg", + "images/2025-01-13-11-18-57_3157_hand.jpg", + "images/2025-01-13-11-18-57_3160_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Approach\nB. Pull\nC. Align\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Release" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3216_hand.jpg", + "images/2025-01-13-11-18-57_3219_hand.jpg", + "images/2025-01-13-11-18-57_3222_hand.jpg", + "images/2025-01-13-11-18-57_3225_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Lift\nC. Release\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Grasp" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3344_hand.jpg", + "images/2025-01-13-11-18-57_3347_hand.jpg", + "images/2025-01-13-11-18-57_3350_hand.jpg", + "images/2025-01-13-11-18-57_3353_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Lift\nB. Twist\nC. Grasp\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Grasp" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3532_hand.jpg", + "images/2025-01-13-11-18-57_3535_hand.jpg", + "images/2025-01-13-11-18-57_3538_hand.jpg", + "images/2025-01-13-11-18-57_3541_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Twist\nB. Pull\nC. Push\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Pull" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3620_hand.jpg", + "images/2025-01-13-11-18-57_3623_hand.jpg", + "images/2025-01-13-11-18-57_3626_hand.jpg", + "images/2025-01-13-11-18-57_3629_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Push\nB. Pull\nC. Approach\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Approach" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3700_hand.jpg", + "images/2025-01-13-11-18-57_3703_hand.jpg", + "images/2025-01-13-11-18-57_3706_hand.jpg", + "images/2025-01-13-11-18-57_3709_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Release\nB. Push\nC. Lift\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Release" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3771_hand.jpg", + "images/2025-01-13-11-18-57_3774_hand.jpg", + "images/2025-01-13-11-18-57_3777_hand.jpg", + "images/2025-01-13-11-18-57_3780_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Approach\nC. Twist\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Grasp" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3918_hand.jpg", + "images/2025-01-13-11-18-57_3921_hand.jpg", + "images/2025-01-13-11-18-57_3924_hand.jpg", + "images/2025-01-13-11-18-57_3927_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Pull\nB. Nudge\nC. Twist\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Pull" + } + ], + "images": [ + "images/2025-01-13-11-18-57_4072_hand.jpg", + "images/2025-01-13-11-18-57_4075_hand.jpg", + "images/2025-01-13-11-18-57_4078_hand.jpg", + "images/2025-01-13-11-18-57_4081_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Grasp\nB. Pull\nC. Twist\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Approach" + } + ], + "images": [ + "images/2025-01-13-11-18-57_4146_hand.jpg", + "images/2025-01-13-11-18-57_4149_hand.jpg", + "images/2025-01-13-11-18-57_4152_hand.jpg", + "images/2025-01-13-11-18-57_4155_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Release\nB. Nudge\nC. Align\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Release" + } + ], + "images": [ + "images/2025-01-13-11-18-57_4229_hand.jpg", + "images/2025-01-13-11-18-57_4232_hand.jpg", + "images/2025-01-13-11-18-57_4235_hand.jpg", + "images/2025-01-13-11-18-57_4238_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Approach\nB. Grasp\nC. Release\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Grasp" + } + ], + "images": [ + "images/2025-01-13-11-18-57_4410_hand.jpg", + "images/2025-01-13-11-18-57_4413_hand.jpg", + "images/2025-01-13-11-18-57_4416_hand.jpg", + "images/2025-01-13-11-18-57_4419_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Pull\nB. Align\nC. Push\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Pull" + } + ], + "images": [ + "images/2025-01-13-11-18-57_4622_hand.jpg", + "images/2025-01-13-11-18-57_4625_hand.jpg", + "images/2025-01-13-11-18-57_4628_hand.jpg", + "images/2025-01-13-11-18-57_4631_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Approach\nB. Twist\nC. Pull\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Approach" + } + ], + "images": [ + "images/2025-01-13-11-18-57_4746_hand.jpg", + "images/2025-01-13-11-18-57_4749_hand.jpg", + "images/2025-01-13-11-18-57_4752_hand.jpg", + "images/2025-01-13-11-18-57_4755_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Approach\nC. Nudge\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Release" + } + ], + "images": [ + "images/2025-01-13-11-18-57_4830_hand.jpg", + "images/2025-01-13-11-18-57_4833_hand.jpg", + "images/2025-01-13-11-18-57_4836_hand.jpg", + "images/2025-01-13-11-18-57_4839_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Remove bolt 4', what low-level action should happen next?\nOptions:\nA. Twist\nB. Lift\nC. Align\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Twist" + } + ], + "images": [ + "images/2025-01-13-11-18-57_178_hand.jpg", + "images/2025-01-13-11-18-57_181_hand.jpg", + "images/2025-01-13-11-18-57_184_hand.jpg", + "images/2025-01-13-11-18-57_187_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Remove bolt 4', what low-level action should happen next?\nOptions:\nA. Twist\nB. Lift\nC. Approach\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Twist" + } + ], + "images": [ + "images/2025-01-13-11-18-57_563_hand.jpg", + "images/2025-01-13-11-18-57_566_hand.jpg", + "images/2025-01-13-11-18-57_569_hand.jpg", + "images/2025-01-13-11-18-57_572_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Remove bolt 4', what low-level action should happen next?\nOptions:\nA. Pull\nB. Nudge\nC. Approach\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Pull" + } + ], + "images": [ + "images/2025-01-13-11-18-57_639_hand.jpg", + "images/2025-01-13-11-18-57_642_hand.jpg", + "images/2025-01-13-11-18-57_645_hand.jpg", + "images/2025-01-13-11-18-57_648_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Place bolt 4', what low-level action should happen next?\nOptions:\nA. Release\nB. Lift\nC. Pull\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Release" + } + ], + "images": [ + "images/2025-01-13-11-18-57_786_hand.jpg", + "images/2025-01-13-11-18-57_789_hand.jpg", + "images/2025-01-13-11-18-57_792_hand.jpg", + "images/2025-01-13-11-18-57_795_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Remove D-SUB', what low-level action should happen next?\nOptions:\nA. Approach\nB. Nudge\nC. Lift\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Pull" + } + ], + "images": [ + "images/2025-01-13-11-18-57_977_hand.jpg", + "images/2025-01-13-11-18-57_980_hand.jpg", + "images/2025-01-13-11-18-57_983_hand.jpg", + "images/2025-01-13-11-18-57_986_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Place D-SUB', what low-level action should happen next?\nOptions:\nA. Align\nB. Release\nC. Nudge\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Release" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1128_hand.jpg", + "images/2025-01-13-11-18-57_1131_hand.jpg", + "images/2025-01-13-11-18-57_1134_hand.jpg", + "images/2025-01-13-11-18-57_1137_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Remove round peg 4', what low-level action should happen next?\nOptions:\nA. Pull\nB. Approach\nC. Push\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Pull" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1273_hand.jpg", + "images/2025-01-13-11-18-57_1276_hand.jpg", + "images/2025-01-13-11-18-57_1279_hand.jpg", + "images/2025-01-13-11-18-57_1282_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Place round peg 4', what low-level action should happen next?\nOptions:\nA. Nudge\nB. Release\nC. Align\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Release" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1442_hand.jpg", + "images/2025-01-13-11-18-57_1445_hand.jpg", + "images/2025-01-13-11-18-57_1448_hand.jpg", + "images/2025-01-13-11-18-57_1451_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Remove square peg 1', what low-level action should happen next?\nOptions:\nA. Align\nB. Release\nC. Lift\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Pull" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1592_hand.jpg", + "images/2025-01-13-11-18-57_1595_hand.jpg", + "images/2025-01-13-11-18-57_1598_hand.jpg", + "images/2025-01-13-11-18-57_1601_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Place square peg 1', what low-level action should happen next?\nOptions:\nA. Push\nB. Pull\nC. Twist\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Release" + } + ], + "images": [ + "images/2025-01-13-11-18-57_1816_hand.jpg", + "images/2025-01-13-11-18-57_1819_hand.jpg", + "images/2025-01-13-11-18-57_1822_hand.jpg", + "images/2025-01-13-11-18-57_1825_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Remove BNC', what low-level action should happen next?\nOptions:\nA. Release\nB. Align\nC. Lift\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Twist" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2002_hand.jpg", + "images/2025-01-13-11-18-57_2005_hand.jpg", + "images/2025-01-13-11-18-57_2008_hand.jpg", + "images/2025-01-13-11-18-57_2011_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Remove BNC', what low-level action should happen next?\nOptions:\nA. Grasp\nB. Approach\nC. Pull\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Pull" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2204_hand.jpg", + "images/2025-01-13-11-18-57_2207_hand.jpg", + "images/2025-01-13-11-18-57_2210_hand.jpg", + "images/2025-01-13-11-18-57_2213_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Place BNC', what low-level action should happen next?\nOptions:\nA. Grasp\nB. Release\nC. Align\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Release" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2377_hand.jpg", + "images/2025-01-13-11-18-57_2380_hand.jpg", + "images/2025-01-13-11-18-57_2383_hand.jpg", + "images/2025-01-13-11-18-57_2386_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Remove square peg 4', what low-level action should happen next?\nOptions:\nA. Align\nB. Approach\nC. Pull\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Pull" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2571_hand.jpg", + "images/2025-01-13-11-18-57_2574_hand.jpg", + "images/2025-01-13-11-18-57_2577_hand.jpg", + "images/2025-01-13-11-18-57_2580_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Place square peg 4', what low-level action should happen next?\nOptions:\nA. Lift\nB. Pull\nC. Twist\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Release" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2760_hand.jpg", + "images/2025-01-13-11-18-57_2763_hand.jpg", + "images/2025-01-13-11-18-57_2766_hand.jpg", + "images/2025-01-13-11-18-57_2769_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Remove round peg 2', what low-level action should happen next?\nOptions:\nA. Pull\nB. Twist\nC. Nudge\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Pull" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2930_hand.jpg", + "images/2025-01-13-11-18-57_2933_hand.jpg", + "images/2025-01-13-11-18-57_2936_hand.jpg", + "images/2025-01-13-11-18-57_2939_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Place round peg 2', what low-level action should happen next?\nOptions:\nA. Lift\nB. Twist\nC. Release\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Release" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3170_hand.jpg", + "images/2025-01-13-11-18-57_3173_hand.jpg", + "images/2025-01-13-11-18-57_3176_hand.jpg", + "images/2025-01-13-11-18-57_3179_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Remove round peg 1', what low-level action should happen next?\nOptions:\nA. Pull\nB. Lift\nC. Nudge\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Pull" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3555_hand.jpg", + "images/2025-01-13-11-18-57_3558_hand.jpg", + "images/2025-01-13-11-18-57_3561_hand.jpg", + "images/2025-01-13-11-18-57_3564_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Place round peg 1', what low-level action should happen next?\nOptions:\nA. Pull\nB. Grasp\nC. Push\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Release" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3720_hand.jpg", + "images/2025-01-13-11-18-57_3723_hand.jpg", + "images/2025-01-13-11-18-57_3726_hand.jpg", + "images/2025-01-13-11-18-57_3729_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Remove Ethernet', what low-level action should happen next?\nOptions:\nA. Twist\nB. Pull\nC. Lift\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Pull" + } + ], + "images": [ + "images/2025-01-13-11-18-57_3965_hand.jpg", + "images/2025-01-13-11-18-57_3968_hand.jpg", + "images/2025-01-13-11-18-57_3971_hand.jpg", + "images/2025-01-13-11-18-57_3974_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Place Ethernet', what low-level action should happen next?\nOptions:\nA. Push\nB. Grasp\nC. Lift\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Release" + } + ], + "images": [ + "images/2025-01-13-11-18-57_4165_hand.jpg", + "images/2025-01-13-11-18-57_4168_hand.jpg", + "images/2025-01-13-11-18-57_4171_hand.jpg", + "images/2025-01-13-11-18-57_4174_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Remove square peg 2', what low-level action should happen next?\nOptions:\nA. Twist\nB. Pull\nC. Align\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Pull" + } + ], + "images": [ + "images/2025-01-13-11-18-57_4463_hand.jpg", + "images/2025-01-13-11-18-57_4466_hand.jpg", + "images/2025-01-13-11-18-57_4469_hand.jpg", + "images/2025-01-13-11-18-57_4472_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Place square peg 2', what low-level action should happen next?\nOptions:\nA. Pull\nB. Nudge\nC. Grasp\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Release" + } + ], + "images": [ + "images/2025-01-13-11-18-57_4769_hand.jpg", + "images/2025-01-13-11-18-57_4772_hand.jpg", + "images/2025-01-13-11-18-57_4775_hand.jpg", + "images/2025-01-13-11-18-57_4778_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the high-level action 'Remove bolt 4', which action best fills [MASK] in this local chain: Grasp > [MASK] > Pull?\nOptions:\nA. Nudge\nB. Release\nC. Push\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Twist" + } + ], + "images": [ + "images/2025-01-13-11-18-57_635_hand.jpg", + "images/2025-01-13-11-18-57_638_hand.jpg", + "images/2025-01-13-11-18-57_641_hand.jpg", + "images/2025-01-13-11-18-57_644_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T12", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the high-level action 'Remove BNC', which action best fills [MASK] in this local chain: Grasp > [MASK] > Pull?\nOptions:\nA. Grasp\nB. Pull\nC. Twist\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Twist" + } + ], + "images": [ + "images/2025-01-13-11-18-57_2169_hand.jpg", + "images/2025-01-13-11-18-57_2172_hand.jpg", + "images/2025-01-13-11-18-57_2175_hand.jpg", + "images/2025-01-13-11-18-57_2178_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T12", + "task_id": "2025-01-13-11-18-57", + "split": "eval", + "split_group_id": "2025-01-13-11-18-57", + "recording_id": "2025-01-13-11-18-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. release\nC. hold and carry\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. pre-approach" + } + ], + "images": [ + "images/2025-01-13-16-27-57_25_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. contact\nC. pre-approach\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_133_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_228_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. hold and carry\nC. transfer\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/2025-01-13-16-27-57_271_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_309_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. hold and carry\nC. approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_387_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. contact\nC. hold and carry\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_482_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. transfer\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/2025-01-13-16-27-57_540_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. approach\nC. contact\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_581_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. pre-approach\nC. contact\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_676_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. pre-approach\nC. approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_786_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/2025-01-13-16-27-57_859_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. hold and carry\nC. contact\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_915_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. contact\nC. transfer\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_995_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. hold and carry\nC. approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1083_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. transfer\nC. approach\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1128_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. release\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1163_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. contact\nC. approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1222_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. contact\nC. transfer\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1303_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. pre-approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1383_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. release\nC. hold and carry\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1441_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. release\nC. contact\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1508_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. release\nC. approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1578_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. transfer\nC. release\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1620_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. contact\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1658_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. hold and carry\nC. transfer\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1720_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. hold and carry\nC. contact\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1860_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. transfer\nC. approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2031_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. pre-approach\nC. transfer\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2149_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. contact\nC. transfer\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2216_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. transfer\nC. approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2261_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. release\nC. pre-approach\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2368_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. contact\nC. transfer\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2503_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. hold and carry\nC. approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2579_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. hold and carry\nC. pre-approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2623_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. pre-approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2717_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2812_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. release\nC. transfer\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2865_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. release\nC. pre-approach\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2921_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. contact\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3023_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. pre-approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3132_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. transfer\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3213_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. contact\nC. release\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3284_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. approach\nC. contact\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3370_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. release\nC. contact\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3466_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. pre-approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3532_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. transfer\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3585_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. transfer\nC. hold and carry\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3668_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. approach\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3820_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. release\nC. pre-approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3961_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. release\nC. hold and carry\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4034_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. approach\nC. contact\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4146_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. contact\nC. approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4335_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. contact\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4480_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. contact\nC. transfer\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4553_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. contact\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4579_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. contact\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4644_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. hold and carry\nC. approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4702_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. approach\nC. contact\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4800_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. release\nC. pre-approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4907_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. release\nC. transfer\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4957_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. release\nC. pre-approach\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4998_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. pre-approach\nC. transfer\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5077_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5164_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. transfer\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5220_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. transfer\nC. release\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5268_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. release\nC. approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5357_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. contact\nC. transfer\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5463_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. hold and carry\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5548_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. hold and carry\nC. approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5617_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. hold and carry\nC. contact\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5776_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. transfer\nC. hold and carry\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5941_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. contact\nC. transfer\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/2025-01-13-16-27-57_6009_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. release\nC. contact\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_6080_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. pre-approach\nC. release\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. pre-approach" + } + ], + "images": [ + "images/2025-01-13-16-27-57_6123_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_25_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_133_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_228_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_271_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_309_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_387_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_482_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_540_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_581_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_676_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_786_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_859_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_915_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_995_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1083_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1128_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1163_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1222_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1303_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1383_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1441_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1508_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1578_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1620_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1658_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1720_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1860_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2031_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2149_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2216_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2261_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2368_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2503_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2579_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2623_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2717_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2812_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2865_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2921_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3023_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3132_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3213_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3284_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3370_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3466_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3532_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3585_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3668_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3820_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3961_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4034_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4146_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4335_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4480_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4553_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4579_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4644_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4702_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4800_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4907_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4957_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4998_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5077_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5164_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5220_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5268_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5357_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5463_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5548_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5617_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5776_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5941_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_6009_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-16-27-57_6080_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/2025-01-13-16-27-57_6123_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_10_hand.jpg", + "images/2025-01-13-16-27-57_18_hand.jpg", + "images/2025-01-13-16-27-57_26_hand.jpg", + "images/2025-01-13-16-27-57_33_hand.jpg", + "images/2025-01-13-16-27-57_41_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_220_hand.jpg", + "images/2025-01-13-16-27-57_224_hand.jpg", + "images/2025-01-13-16-27-57_228_hand.jpg", + "images/2025-01-13-16-27-57_233_hand.jpg", + "images/2025-01-13-16-27-57_237_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_253_hand.jpg", + "images/2025-01-13-16-27-57_262_hand.jpg", + "images/2025-01-13-16-27-57_271_hand.jpg", + "images/2025-01-13-16-27-57_280_hand.jpg", + "images/2025-01-13-16-27-57_289_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_577_hand.jpg", + "images/2025-01-13-16-27-57_579_hand.jpg", + "images/2025-01-13-16-27-57_581_hand.jpg", + "images/2025-01-13-16-27-57_583_hand.jpg", + "images/2025-01-13-16-27-57_585_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_829_hand.jpg", + "images/2025-01-13-16-27-57_844_hand.jpg", + "images/2025-01-13-16-27-57_860_hand.jpg", + "images/2025-01-13-16-27-57_875_hand.jpg", + "images/2025-01-13-16-27-57_890_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_950_hand.jpg", + "images/2025-01-13-16-27-57_972_hand.jpg", + "images/2025-01-13-16-27-57_995_hand.jpg", + "images/2025-01-13-16-27-57_1018_hand.jpg", + "images/2025-01-13-16-27-57_1040_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1111_hand.jpg", + "images/2025-01-13-16-27-57_1120_hand.jpg", + "images/2025-01-13-16-27-57_1128_hand.jpg", + "images/2025-01-13-16-27-57_1137_hand.jpg", + "images/2025-01-13-16-27-57_1146_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1160_hand.jpg", + "images/2025-01-13-16-27-57_1161_hand.jpg", + "images/2025-01-13-16-27-57_1163_hand.jpg", + "images/2025-01-13-16-27-57_1165_hand.jpg", + "images/2025-01-13-16-27-57_1166_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1353_hand.jpg", + "images/2025-01-13-16-27-57_1368_hand.jpg", + "images/2025-01-13-16-27-57_1383_hand.jpg", + "images/2025-01-13-16-27-57_1398_hand.jpg", + "images/2025-01-13-16-27-57_1413_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1472_hand.jpg", + "images/2025-01-13-16-27-57_1490_hand.jpg", + "images/2025-01-13-16-27-57_1508_hand.jpg", + "images/2025-01-13-16-27-57_1527_hand.jpg", + "images/2025-01-13-16-27-57_1545_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1601_hand.jpg", + "images/2025-01-13-16-27-57_1611_hand.jpg", + "images/2025-01-13-16-27-57_1620_hand.jpg", + "images/2025-01-13-16-27-57_1630_hand.jpg", + "images/2025-01-13-16-27-57_1640_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1655_hand.jpg", + "images/2025-01-13-16-27-57_1657_hand.jpg", + "images/2025-01-13-16-27-57_1658_hand.jpg", + "images/2025-01-13-16-27-57_1660_hand.jpg", + "images/2025-01-13-16-27-57_1662_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1687_hand.jpg", + "images/2025-01-13-16-27-57_1704_hand.jpg", + "images/2025-01-13-16-27-57_1720_hand.jpg", + "images/2025-01-13-16-27-57_1737_hand.jpg", + "images/2025-01-13-16-27-57_1754_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1807_hand.jpg", + "images/2025-01-13-16-27-57_1833_hand.jpg", + "images/2025-01-13-16-27-57_1860_hand.jpg", + "images/2025-01-13-16-27-57_1887_hand.jpg", + "images/2025-01-13-16-27-57_1913_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2129_hand.jpg", + "images/2025-01-13-16-27-57_2139_hand.jpg", + "images/2025-01-13-16-27-57_2149_hand.jpg", + "images/2025-01-13-16-27-57_2159_hand.jpg", + "images/2025-01-13-16-27-57_2169_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2194_hand.jpg", + "images/2025-01-13-16-27-57_2205_hand.jpg", + "images/2025-01-13-16-27-57_2216_hand.jpg", + "images/2025-01-13-16-27-57_2227_hand.jpg", + "images/2025-01-13-16-27-57_2238_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2255_hand.jpg", + "images/2025-01-13-16-27-57_2258_hand.jpg", + "images/2025-01-13-16-27-57_2261_hand.jpg", + "images/2025-01-13-16-27-57_2264_hand.jpg", + "images/2025-01-13-16-27-57_2267_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2479_hand.jpg", + "images/2025-01-13-16-27-57_2491_hand.jpg", + "images/2025-01-13-16-27-57_2503_hand.jpg", + "images/2025-01-13-16-27-57_2515_hand.jpg", + "images/2025-01-13-16-27-57_2527_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2557_hand.jpg", + "images/2025-01-13-16-27-57_2568_hand.jpg", + "images/2025-01-13-16-27-57_2579_hand.jpg", + "images/2025-01-13-16-27-57_2590_hand.jpg", + "images/2025-01-13-16-27-57_2601_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2619_hand.jpg", + "images/2025-01-13-16-27-57_2621_hand.jpg", + "images/2025-01-13-16-27-57_2623_hand.jpg", + "images/2025-01-13-16-27-57_2625_hand.jpg", + "images/2025-01-13-16-27-57_2627_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2663_hand.jpg", + "images/2025-01-13-16-27-57_2690_hand.jpg", + "images/2025-01-13-16-27-57_2718_hand.jpg", + "images/2025-01-13-16-27-57_2745_hand.jpg", + "images/2025-01-13-16-27-57_2772_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2837_hand.jpg", + "images/2025-01-13-16-27-57_2851_hand.jpg", + "images/2025-01-13-16-27-57_2865_hand.jpg", + "images/2025-01-13-16-27-57_2879_hand.jpg", + "images/2025-01-13-16-27-57_2893_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2966_hand.jpg", + "images/2025-01-13-16-27-57_2995_hand.jpg", + "images/2025-01-13-16-27-57_3023_hand.jpg", + "images/2025-01-13-16-27-57_3051_hand.jpg", + "images/2025-01-13-16-27-57_3080_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3174_hand.jpg", + "images/2025-01-13-16-27-57_3194_hand.jpg", + "images/2025-01-13-16-27-57_3214_hand.jpg", + "images/2025-01-13-16-27-57_3233_hand.jpg", + "images/2025-01-13-16-27-57_3253_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3322_hand.jpg", + "images/2025-01-13-16-27-57_3346_hand.jpg", + "images/2025-01-13-16-27-57_3370_hand.jpg", + "images/2025-01-13-16-27-57_3395_hand.jpg", + "images/2025-01-13-16-27-57_3419_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3503_hand.jpg", + "images/2025-01-13-16-27-57_3517_hand.jpg", + "images/2025-01-13-16-27-57_3532_hand.jpg", + "images/2025-01-13-16-27-57_3547_hand.jpg", + "images/2025-01-13-16-27-57_3561_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3620_hand.jpg", + "images/2025-01-13-16-27-57_3644_hand.jpg", + "images/2025-01-13-16-27-57_3668_hand.jpg", + "images/2025-01-13-16-27-57_3692_hand.jpg", + "images/2025-01-13-16-27-57_3716_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3773_hand.jpg", + "images/2025-01-13-16-27-57_3797_hand.jpg", + "images/2025-01-13-16-27-57_3820_hand.jpg", + "images/2025-01-13-16-27-57_3843_hand.jpg", + "images/2025-01-13-16-27-57_3867_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3921_hand.jpg", + "images/2025-01-13-16-27-57_3941_hand.jpg", + "images/2025-01-13-16-27-57_3962_hand.jpg", + "images/2025-01-13-16-27-57_3982_hand.jpg", + "images/2025-01-13-16-27-57_4002_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4031_hand.jpg", + "images/2025-01-13-16-27-57_4033_hand.jpg", + "images/2025-01-13-16-27-57_4034_hand.jpg", + "images/2025-01-13-16-27-57_4036_hand.jpg", + "images/2025-01-13-16-27-57_4038_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4080_hand.jpg", + "images/2025-01-13-16-27-57_4113_hand.jpg", + "images/2025-01-13-16-27-57_4146_hand.jpg", + "images/2025-01-13-16-27-57_4179_hand.jpg", + "images/2025-01-13-16-27-57_4212_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4282_hand.jpg", + "images/2025-01-13-16-27-57_4308_hand.jpg", + "images/2025-01-13-16-27-57_4335_hand.jpg", + "images/2025-01-13-16-27-57_4362_hand.jpg", + "images/2025-01-13-16-27-57_4388_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4546_hand.jpg", + "images/2025-01-13-16-27-57_4550_hand.jpg", + "images/2025-01-13-16-27-57_4554_hand.jpg", + "images/2025-01-13-16-27-57_4557_hand.jpg", + "images/2025-01-13-16-27-57_4561_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4613_hand.jpg", + "images/2025-01-13-16-27-57_4629_hand.jpg", + "images/2025-01-13-16-27-57_4644_hand.jpg", + "images/2025-01-13-16-27-57_4660_hand.jpg", + "images/2025-01-13-16-27-57_4676_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4698_hand.jpg", + "images/2025-01-13-16-27-57_4700_hand.jpg", + "images/2025-01-13-16-27-57_4702_hand.jpg", + "images/2025-01-13-16-27-57_4704_hand.jpg", + "images/2025-01-13-16-27-57_4706_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4937_hand.jpg", + "images/2025-01-13-16-27-57_4947_hand.jpg", + "images/2025-01-13-16-27-57_4958_hand.jpg", + "images/2025-01-13-16-27-57_4968_hand.jpg", + "images/2025-01-13-16-27-57_4978_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5196_hand.jpg", + "images/2025-01-13-16-27-57_5208_hand.jpg", + "images/2025-01-13-16-27-57_5220_hand.jpg", + "images/2025-01-13-16-27-57_5233_hand.jpg", + "images/2025-01-13-16-27-57_5245_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5264_hand.jpg", + "images/2025-01-13-16-27-57_5266_hand.jpg", + "images/2025-01-13-16-27-57_5268_hand.jpg", + "images/2025-01-13-16-27-57_5270_hand.jpg", + "images/2025-01-13-16-27-57_5272_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5307_hand.jpg", + "images/2025-01-13-16-27-57_5332_hand.jpg", + "images/2025-01-13-16-27-57_5358_hand.jpg", + "images/2025-01-13-16-27-57_5383_hand.jpg", + "images/2025-01-13-16-27-57_5408_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5510_hand.jpg", + "images/2025-01-13-16-27-57_5529_hand.jpg", + "images/2025-01-13-16-27-57_5548_hand.jpg", + "images/2025-01-13-16-27-57_5567_hand.jpg", + "images/2025-01-13-16-27-57_5586_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5612_hand.jpg", + "images/2025-01-13-16-27-57_5615_hand.jpg", + "images/2025-01-13-16-27-57_5617_hand.jpg", + "images/2025-01-13-16-27-57_5619_hand.jpg", + "images/2025-01-13-16-27-57_5622_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5975_hand.jpg", + "images/2025-01-13-16-27-57_5992_hand.jpg", + "images/2025-01-13-16-27-57_6009_hand.jpg", + "images/2025-01-13-16-27-57_6026_hand.jpg", + "images/2025-01-13-16-27-57_6043_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_6070_hand.jpg", + "images/2025-01-13-16-27-57_6075_hand.jpg", + "images/2025-01-13-16-27-57_6080_hand.jpg", + "images/2025-01-13-16-27-57_6085_hand.jpg", + "images/2025-01-13-16-27-57_6090_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_6108_hand.jpg", + "images/2025-01-13-16-27-57_6115_hand.jpg", + "images/2025-01-13-16-27-57_6123_hand.jpg", + "images/2025-01-13-16-27-57_6131_hand.jpg", + "images/2025-01-13-16-27-57_6138_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-16-27-57_75_hand.jpg", + "images/2025-01-13-16-27-57_89_hand.jpg", + "images/2025-01-13-16-27-57_103_hand.jpg", + "images/2025-01-13-16-27-57_118_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-16-27-57_251_hand.jpg", + "images/2025-01-13-16-27-57_257_hand.jpg", + "images/2025-01-13-16-27-57_262_hand.jpg", + "images/2025-01-13-16-27-57_268_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-16-27-57_342_hand.jpg", + "images/2025-01-13-16-27-57_356_hand.jpg", + "images/2025-01-13-16-27-57_370_hand.jpg", + "images/2025-01-13-16-27-57_384_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-16-27-57_517_hand.jpg", + "images/2025-01-13-16-27-57_523_hand.jpg", + "images/2025-01-13-16-27-57_529_hand.jpg", + "images/2025-01-13-16-27-57_535_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-16-27-57_617_hand.jpg", + "images/2025-01-13-16-27-57_633_hand.jpg", + "images/2025-01-13-16-27-57_650_hand.jpg", + "images/2025-01-13-16-27-57_666_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-16-27-57_824_hand.jpg", + "images/2025-01-13-16-27-57_833_hand.jpg", + "images/2025-01-13-16-27-57_841_hand.jpg", + "images/2025-01-13-16-27-57_849_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-16-27-57_945_hand.jpg", + "images/2025-01-13-16-27-57_958_hand.jpg", + "images/2025-01-13-16-27-57_971_hand.jpg", + "images/2025-01-13-16-27-57_984_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1109_hand.jpg", + "images/2025-01-13-16-27-57_1115_hand.jpg", + "images/2025-01-13-16-27-57_1120_hand.jpg", + "images/2025-01-13-16-27-57_1125_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1191_hand.jpg", + "images/2025-01-13-16-27-57_1203_hand.jpg", + "images/2025-01-13-16-27-57_1216_hand.jpg", + "images/2025-01-13-16-27-57_1228_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1349_hand.jpg", + "images/2025-01-13-16-27-57_1358_hand.jpg", + "images/2025-01-13-16-27-57_1366_hand.jpg", + "images/2025-01-13-16-27-57_1375_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1468_hand.jpg", + "images/2025-01-13-16-27-57_1478_hand.jpg", + "images/2025-01-13-16-27-57_1489_hand.jpg", + "images/2025-01-13-16-27-57_1499_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1599_hand.jpg", + "images/2025-01-13-16-27-57_1605_hand.jpg", + "images/2025-01-13-16-27-57_1610_hand.jpg", + "images/2025-01-13-16-27-57_1616_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this operation will fail" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1700_hand.jpg", + "images/2025-01-13-16-27-57_1721_hand.jpg", + "images/2025-01-13-16-27-57_1742_hand.jpg", + "images/2025-01-13-16-27-57_1763_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1975_hand.jpg", + "images/2025-01-13-16-27-57_1993_hand.jpg", + "images/2025-01-13-16-27-57_2010_hand.jpg", + "images/2025-01-13-16-27-57_2028_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2192_hand.jpg", + "images/2025-01-13-16-27-57_2199_hand.jpg", + "images/2025-01-13-16-27-57_2206_hand.jpg", + "images/2025-01-13-16-27-57_2212_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2305_hand.jpg", + "images/2025-01-13-16-27-57_2326_hand.jpg", + "images/2025-01-13-16-27-57_2346_hand.jpg", + "images/2025-01-13-16-27-57_2366_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2554_hand.jpg", + "images/2025-01-13-16-27-57_2561_hand.jpg", + "images/2025-01-13-16-27-57_2567_hand.jpg", + "images/2025-01-13-16-27-57_2574_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2655_hand.jpg", + "images/2025-01-13-16-27-57_2669_hand.jpg", + "images/2025-01-13-16-27-57_2683_hand.jpg", + "images/2025-01-13-16-27-57_2697_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2834_hand.jpg", + "images/2025-01-13-16-27-57_2843_hand.jpg", + "images/2025-01-13-16-27-57_2851_hand.jpg", + "images/2025-01-13-16-27-57_2860_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2960_hand.jpg", + "images/2025-01-13-16-27-57_2977_hand.jpg", + "images/2025-01-13-16-27-57_2993_hand.jpg", + "images/2025-01-13-16-27-57_3009_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3168_hand.jpg", + "images/2025-01-13-16-27-57_3179_hand.jpg", + "images/2025-01-13-16-27-57_3190_hand.jpg", + "images/2025-01-13-16-27-57_3200_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3317_hand.jpg", + "images/2025-01-13-16-27-57_3331_hand.jpg", + "images/2025-01-13-16-27-57_3346_hand.jpg", + "images/2025-01-13-16-27-57_3360_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3498_hand.jpg", + "images/2025-01-13-16-27-57_3506_hand.jpg", + "images/2025-01-13-16-27-57_3514_hand.jpg", + "images/2025-01-13-16-27-57_3522_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3630_hand.jpg", + "images/2025-01-13-16-27-57_3652_hand.jpg", + "images/2025-01-13-16-27-57_3675_hand.jpg", + "images/2025-01-13-16-27-57_3698_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3914_hand.jpg", + "images/2025-01-13-16-27-57_3925_hand.jpg", + "images/2025-01-13-16-27-57_3936_hand.jpg", + "images/2025-01-13-16-27-57_3947_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this operation will fail" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4088_hand.jpg", + "images/2025-01-13-16-27-57_4117_hand.jpg", + "images/2025-01-13-16-27-57_4145_hand.jpg", + "images/2025-01-13-16-27-57_4174_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4441_hand.jpg", + "images/2025-01-13-16-27-57_4454_hand.jpg", + "images/2025-01-13-16-27-57_4467_hand.jpg", + "images/2025-01-13-16-27-57_4480_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4609_hand.jpg", + "images/2025-01-13-16-27-57_4617_hand.jpg", + "images/2025-01-13-16-27-57_4626_hand.jpg", + "images/2025-01-13-16-27-57_4635_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4736_hand.jpg", + "images/2025-01-13-16-27-57_4752_hand.jpg", + "images/2025-01-13-16-27-57_4769_hand.jpg", + "images/2025-01-13-16-27-57_4785_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4935_hand.jpg", + "images/2025-01-13-16-27-57_4941_hand.jpg", + "images/2025-01-13-16-27-57_4947_hand.jpg", + "images/2025-01-13-16-27-57_4953_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5027_hand.jpg", + "images/2025-01-13-16-27-57_5041_hand.jpg", + "images/2025-01-13-16-27-57_5054_hand.jpg", + "images/2025-01-13-16-27-57_5067_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5193_hand.jpg", + "images/2025-01-13-16-27-57_5200_hand.jpg", + "images/2025-01-13-16-27-57_5207_hand.jpg", + "images/2025-01-13-16-27-57_5214_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5303_hand.jpg", + "images/2025-01-13-16-27-57_5319_hand.jpg", + "images/2025-01-13-16-27-57_5334_hand.jpg", + "images/2025-01-13-16-27-57_5350_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5505_hand.jpg", + "images/2025-01-13-16-27-57_5515_hand.jpg", + "images/2025-01-13-16-27-57_5526_hand.jpg", + "images/2025-01-13-16-27-57_5536_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5667_hand.jpg", + "images/2025-01-13-16-27-57_5691_hand.jpg", + "images/2025-01-13-16-27-57_5716_hand.jpg", + "images/2025-01-13-16-27-57_5741_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5973_hand.jpg", + "images/2025-01-13-16-27-57_5984_hand.jpg", + "images/2025-01-13-16-27-57_5995_hand.jpg", + "images/2025-01-13-16-27-57_6005_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/2025-01-13-16-27-57_279_hand.jpg", + "images/2025-01-13-16-27-57_412_hand.jpg", + "images/2025-01-13-16-27-57_146_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/2025-01-13-16-27-57_547_hand.jpg", + "images/2025-01-13-16-27-57_412_hand.jpg", + "images/2025-01-13-16-27-57_279_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/2025-01-13-16-27-57_547_hand.jpg", + "images/2025-01-13-16-27-57_699_hand.jpg", + "images/2025-01-13-16-27-57_412_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/2025-01-13-16-27-57_866_hand.jpg", + "images/2025-01-13-16-27-57_699_hand.jpg", + "images/2025-01-13-16-27-57_547_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/2025-01-13-16-27-57_699_hand.jpg", + "images/2025-01-13-16-27-57_1011_hand.jpg", + "images/2025-01-13-16-27-57_866_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/2025-01-13-16-27-57_866_hand.jpg", + "images/2025-01-13-16-27-57_1135_hand.jpg", + "images/2025-01-13-16-27-57_1011_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1011_hand.jpg", + "images/2025-01-13-16-27-57_1252_hand.jpg", + "images/2025-01-13-16-27-57_1135_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1392_hand.jpg", + "images/2025-01-13-16-27-57_1252_hand.jpg", + "images/2025-01-13-16-27-57_1135_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1520_hand.jpg", + "images/2025-01-13-16-27-57_1392_hand.jpg", + "images/2025-01-13-16-27-57_1252_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1520_hand.jpg", + "images/2025-01-13-16-27-57_1392_hand.jpg", + "images/2025-01-13-16-27-57_1627_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1520_hand.jpg", + "images/2025-01-13-16-27-57_1627_hand.jpg", + "images/2025-01-13-16-27-57_2063_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1627_hand.jpg", + "images/2025-01-13-16-27-57_2063_hand.jpg", + "images/2025-01-13-16-27-57_2225_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2406_hand.jpg", + "images/2025-01-13-16-27-57_2225_hand.jpg", + "images/2025-01-13-16-27-57_2063_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2406_hand.jpg", + "images/2025-01-13-16-27-57_2225_hand.jpg", + "images/2025-01-13-16-27-57_2586_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2406_hand.jpg", + "images/2025-01-13-16-27-57_2725_hand.jpg", + "images/2025-01-13-16-27-57_2586_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2876_hand.jpg", + "images/2025-01-13-16-27-57_2586_hand.jpg", + "images/2025-01-13-16-27-57_2725_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3041_hand.jpg", + "images/2025-01-13-16-27-57_2876_hand.jpg", + "images/2025-01-13-16-27-57_2725_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2876_hand.jpg", + "images/2025-01-13-16-27-57_3221_hand.jpg", + "images/2025-01-13-16-27-57_3041_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3041_hand.jpg", + "images/2025-01-13-16-27-57_3388_hand.jpg", + "images/2025-01-13-16-27-57_3221_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3388_hand.jpg", + "images/2025-01-13-16-27-57_3221_hand.jpg", + "images/2025-01-13-16-27-57_3538_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3388_hand.jpg", + "images/2025-01-13-16-27-57_3743_hand.jpg", + "images/2025-01-13-16-27-57_3538_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3538_hand.jpg", + "images/2025-01-13-16-27-57_3743_hand.jpg", + "images/2025-01-13-16-27-57_3968_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3743_hand.jpg", + "images/2025-01-13-16-27-57_3968_hand.jpg", + "images/2025-01-13-16-27-57_4506_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4506_hand.jpg", + "images/2025-01-13-16-27-57_4652_hand.jpg", + "images/2025-01-13-16-27-57_3968_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4652_hand.jpg", + "images/2025-01-13-16-27-57_4817_hand.jpg", + "images/2025-01-13-16-27-57_4506_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4652_hand.jpg", + "images/2025-01-13-16-27-57_4817_hand.jpg", + "images/2025-01-13-16-27-57_4965_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4817_hand.jpg", + "images/2025-01-13-16-27-57_5093_hand.jpg", + "images/2025-01-13-16-27-57_4965_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5228_hand.jpg", + "images/2025-01-13-16-27-57_4965_hand.jpg", + "images/2025-01-13-16-27-57_5093_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5093_hand.jpg", + "images/2025-01-13-16-27-57_5382_hand.jpg", + "images/2025-01-13-16-27-57_5228_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5556_hand.jpg", + "images/2025-01-13-16-27-57_5382_hand.jpg", + "images/2025-01-13-16-27-57_5228_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5790_hand.jpg", + "images/2025-01-13-16-27-57_5382_hand.jpg", + "images/2025-01-13-16-27-57_5556_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5556_hand.jpg", + "images/2025-01-13-16-27-57_5790_hand.jpg", + "images/2025-01-13-16-27-57_6026_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_146_hand.jpg", + "images/2025-01-13-16-27-57_279_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_279_hand.jpg", + "images/2025-01-13-16-27-57_412_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_412_hand.jpg", + "images/2025-01-13-16-27-57_547_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_547_hand.jpg", + "images/2025-01-13-16-27-57_699_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_699_hand.jpg", + "images/2025-01-13-16-27-57_866_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1011_hand.jpg", + "images/2025-01-13-16-27-57_866_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1011_hand.jpg", + "images/2025-01-13-16-27-57_1135_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1135_hand.jpg", + "images/2025-01-13-16-27-57_1252_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1392_hand.jpg", + "images/2025-01-13-16-27-57_1252_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1392_hand.jpg", + "images/2025-01-13-16-27-57_1520_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1520_hand.jpg", + "images/2025-01-13-16-27-57_1627_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1627_hand.jpg", + "images/2025-01-13-16-27-57_2063_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2225_hand.jpg", + "images/2025-01-13-16-27-57_2063_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2225_hand.jpg", + "images/2025-01-13-16-27-57_2406_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2586_hand.jpg", + "images/2025-01-13-16-27-57_2406_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2725_hand.jpg", + "images/2025-01-13-16-27-57_2586_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2876_hand.jpg", + "images/2025-01-13-16-27-57_2725_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2876_hand.jpg", + "images/2025-01-13-16-27-57_3041_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3041_hand.jpg", + "images/2025-01-13-16-27-57_3221_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3221_hand.jpg", + "images/2025-01-13-16-27-57_3388_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3388_hand.jpg", + "images/2025-01-13-16-27-57_3538_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3538_hand.jpg", + "images/2025-01-13-16-27-57_3743_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3968_hand.jpg", + "images/2025-01-13-16-27-57_3743_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4506_hand.jpg", + "images/2025-01-13-16-27-57_3968_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4652_hand.jpg", + "images/2025-01-13-16-27-57_4506_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4652_hand.jpg", + "images/2025-01-13-16-27-57_4817_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4965_hand.jpg", + "images/2025-01-13-16-27-57_4817_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5093_hand.jpg", + "images/2025-01-13-16-27-57_4965_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5093_hand.jpg", + "images/2025-01-13-16-27-57_5228_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5382_hand.jpg", + "images/2025-01-13-16-27-57_5228_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5556_hand.jpg", + "images/2025-01-13-16-27-57_5382_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5556_hand.jpg", + "images/2025-01-13-16-27-57_5790_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5790_hand.jpg", + "images/2025-01-13-16-27-57_6026_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_146_hand.jpg", + "images/2025-01-13-16-27-57_412_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_547_hand.jpg", + "images/2025-01-13-16-27-57_279_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_699_hand.jpg", + "images/2025-01-13-16-27-57_412_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_547_hand.jpg", + "images/2025-01-13-16-27-57_866_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1011_hand.jpg", + "images/2025-01-13-16-27-57_699_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1135_hand.jpg", + "images/2025-01-13-16-27-57_866_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1252_hand.jpg", + "images/2025-01-13-16-27-57_1011_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1392_hand.jpg", + "images/2025-01-13-16-27-57_1135_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1252_hand.jpg", + "images/2025-01-13-16-27-57_1520_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1627_hand.jpg", + "images/2025-01-13-16-27-57_1392_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1520_hand.jpg", + "images/2025-01-13-16-27-57_2063_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2225_hand.jpg", + "images/2025-01-13-16-27-57_1627_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2063_hand.jpg", + "images/2025-01-13-16-27-57_2406_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2225_hand.jpg", + "images/2025-01-13-16-27-57_2586_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2725_hand.jpg", + "images/2025-01-13-16-27-57_2406_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2586_hand.jpg", + "images/2025-01-13-16-27-57_2876_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2725_hand.jpg", + "images/2025-01-13-16-27-57_3041_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2876_hand.jpg", + "images/2025-01-13-16-27-57_3221_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3041_hand.jpg", + "images/2025-01-13-16-27-57_3388_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3538_hand.jpg", + "images/2025-01-13-16-27-57_3221_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3388_hand.jpg", + "images/2025-01-13-16-27-57_3743_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3968_hand.jpg", + "images/2025-01-13-16-27-57_3538_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3743_hand.jpg", + "images/2025-01-13-16-27-57_4506_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3968_hand.jpg", + "images/2025-01-13-16-27-57_4652_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4506_hand.jpg", + "images/2025-01-13-16-27-57_4817_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4965_hand.jpg", + "images/2025-01-13-16-27-57_4652_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5093_hand.jpg", + "images/2025-01-13-16-27-57_4817_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5228_hand.jpg", + "images/2025-01-13-16-27-57_4965_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5093_hand.jpg", + "images/2025-01-13-16-27-57_5382_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5228_hand.jpg", + "images/2025-01-13-16-27-57_5556_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5382_hand.jpg", + "images/2025-01-13-16-27-57_5790_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-16-27-57_6026_hand.jpg", + "images/2025-01-13-16-27-57_5556_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_107_hand.jpg", + "images/2025-01-13-16-27-57_110_hand.jpg", + "images/2025-01-13-16-27-57_113_hand.jpg", + "images/2025-01-13-16-27-57_116_hand.jpg", + "images/2025-01-13-16-27-57_119_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_140_hand.jpg", + "images/2025-01-13-16-27-57_143_hand.jpg", + "images/2025-01-13-16-27-57_146_hand.jpg", + "images/2025-01-13-16-27-57_149_hand.jpg", + "images/2025-01-13-16-27-57_152_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_173_hand.jpg", + "images/2025-01-13-16-27-57_176_hand.jpg", + "images/2025-01-13-16-27-57_179_hand.jpg", + "images/2025-01-13-16-27-57_182_hand.jpg", + "images/2025-01-13-16-27-57_185_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_260_hand.jpg", + "images/2025-01-13-16-27-57_263_hand.jpg", + "images/2025-01-13-16-27-57_266_hand.jpg", + "images/2025-01-13-16-27-57_269_hand.jpg", + "images/2025-01-13-16-27-57_272_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_274_hand.jpg", + "images/2025-01-13-16-27-57_277_hand.jpg", + "images/2025-01-13-16-27-57_280_hand.jpg", + "images/2025-01-13-16-27-57_283_hand.jpg", + "images/2025-01-13-16-27-57_286_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_287_hand.jpg", + "images/2025-01-13-16-27-57_290_hand.jpg", + "images/2025-01-13-16-27-57_293_hand.jpg", + "images/2025-01-13-16-27-57_296_hand.jpg", + "images/2025-01-13-16-27-57_299_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_373_hand.jpg", + "images/2025-01-13-16-27-57_376_hand.jpg", + "images/2025-01-13-16-27-57_379_hand.jpg", + "images/2025-01-13-16-27-57_382_hand.jpg", + "images/2025-01-13-16-27-57_385_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_406_hand.jpg", + "images/2025-01-13-16-27-57_409_hand.jpg", + "images/2025-01-13-16-27-57_412_hand.jpg", + "images/2025-01-13-16-27-57_415_hand.jpg", + "images/2025-01-13-16-27-57_418_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_439_hand.jpg", + "images/2025-01-13-16-27-57_442_hand.jpg", + "images/2025-01-13-16-27-57_445_hand.jpg", + "images/2025-01-13-16-27-57_448_hand.jpg", + "images/2025-01-13-16-27-57_451_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_527_hand.jpg", + "images/2025-01-13-16-27-57_530_hand.jpg", + "images/2025-01-13-16-27-57_533_hand.jpg", + "images/2025-01-13-16-27-57_536_hand.jpg", + "images/2025-01-13-16-27-57_539_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_542_hand.jpg", + "images/2025-01-13-16-27-57_545_hand.jpg", + "images/2025-01-13-16-27-57_548_hand.jpg", + "images/2025-01-13-16-27-57_551_hand.jpg", + "images/2025-01-13-16-27-57_554_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_556_hand.jpg", + "images/2025-01-13-16-27-57_559_hand.jpg", + "images/2025-01-13-16-27-57_562_hand.jpg", + "images/2025-01-13-16-27-57_565_hand.jpg", + "images/2025-01-13-16-27-57_568_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_654_hand.jpg", + "images/2025-01-13-16-27-57_657_hand.jpg", + "images/2025-01-13-16-27-57_660_hand.jpg", + "images/2025-01-13-16-27-57_663_hand.jpg", + "images/2025-01-13-16-27-57_666_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_693_hand.jpg", + "images/2025-01-13-16-27-57_696_hand.jpg", + "images/2025-01-13-16-27-57_699_hand.jpg", + "images/2025-01-13-16-27-57_702_hand.jpg", + "images/2025-01-13-16-27-57_705_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_732_hand.jpg", + "images/2025-01-13-16-27-57_735_hand.jpg", + "images/2025-01-13-16-27-57_738_hand.jpg", + "images/2025-01-13-16-27-57_741_hand.jpg", + "images/2025-01-13-16-27-57_744_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_841_hand.jpg", + "images/2025-01-13-16-27-57_844_hand.jpg", + "images/2025-01-13-16-27-57_847_hand.jpg", + "images/2025-01-13-16-27-57_850_hand.jpg", + "images/2025-01-13-16-27-57_853_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_860_hand.jpg", + "images/2025-01-13-16-27-57_863_hand.jpg", + "images/2025-01-13-16-27-57_866_hand.jpg", + "images/2025-01-13-16-27-57_869_hand.jpg", + "images/2025-01-13-16-27-57_872_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_879_hand.jpg", + "images/2025-01-13-16-27-57_882_hand.jpg", + "images/2025-01-13-16-27-57_885_hand.jpg", + "images/2025-01-13-16-27-57_888_hand.jpg", + "images/2025-01-13-16-27-57_891_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_974_hand.jpg", + "images/2025-01-13-16-27-57_977_hand.jpg", + "images/2025-01-13-16-27-57_980_hand.jpg", + "images/2025-01-13-16-27-57_983_hand.jpg", + "images/2025-01-13-16-27-57_986_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1005_hand.jpg", + "images/2025-01-13-16-27-57_1008_hand.jpg", + "images/2025-01-13-16-27-57_1011_hand.jpg", + "images/2025-01-13-16-27-57_1014_hand.jpg", + "images/2025-01-13-16-27-57_1017_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1036_hand.jpg", + "images/2025-01-13-16-27-57_1039_hand.jpg", + "images/2025-01-13-16-27-57_1042_hand.jpg", + "images/2025-01-13-16-27-57_1045_hand.jpg", + "images/2025-01-13-16-27-57_1048_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1117_hand.jpg", + "images/2025-01-13-16-27-57_1120_hand.jpg", + "images/2025-01-13-16-27-57_1123_hand.jpg", + "images/2025-01-13-16-27-57_1126_hand.jpg", + "images/2025-01-13-16-27-57_1129_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1129_hand.jpg", + "images/2025-01-13-16-27-57_1132_hand.jpg", + "images/2025-01-13-16-27-57_1135_hand.jpg", + "images/2025-01-13-16-27-57_1138_hand.jpg", + "images/2025-01-13-16-27-57_1141_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1141_hand.jpg", + "images/2025-01-13-16-27-57_1144_hand.jpg", + "images/2025-01-13-16-27-57_1147_hand.jpg", + "images/2025-01-13-16-27-57_1150_hand.jpg", + "images/2025-01-13-16-27-57_1153_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1218_hand.jpg", + "images/2025-01-13-16-27-57_1221_hand.jpg", + "images/2025-01-13-16-27-57_1224_hand.jpg", + "images/2025-01-13-16-27-57_1227_hand.jpg", + "images/2025-01-13-16-27-57_1230_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1246_hand.jpg", + "images/2025-01-13-16-27-57_1249_hand.jpg", + "images/2025-01-13-16-27-57_1252_hand.jpg", + "images/2025-01-13-16-27-57_1255_hand.jpg", + "images/2025-01-13-16-27-57_1258_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1274_hand.jpg", + "images/2025-01-13-16-27-57_1277_hand.jpg", + "images/2025-01-13-16-27-57_1280_hand.jpg", + "images/2025-01-13-16-27-57_1283_hand.jpg", + "images/2025-01-13-16-27-57_1286_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1366_hand.jpg", + "images/2025-01-13-16-27-57_1369_hand.jpg", + "images/2025-01-13-16-27-57_1372_hand.jpg", + "images/2025-01-13-16-27-57_1375_hand.jpg", + "images/2025-01-13-16-27-57_1378_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1386_hand.jpg", + "images/2025-01-13-16-27-57_1389_hand.jpg", + "images/2025-01-13-16-27-57_1392_hand.jpg", + "images/2025-01-13-16-27-57_1395_hand.jpg", + "images/2025-01-13-16-27-57_1398_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1407_hand.jpg", + "images/2025-01-13-16-27-57_1410_hand.jpg", + "images/2025-01-13-16-27-57_1413_hand.jpg", + "images/2025-01-13-16-27-57_1416_hand.jpg", + "images/2025-01-13-16-27-57_1419_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1490_hand.jpg", + "images/2025-01-13-16-27-57_1493_hand.jpg", + "images/2025-01-13-16-27-57_1496_hand.jpg", + "images/2025-01-13-16-27-57_1499_hand.jpg", + "images/2025-01-13-16-27-57_1502_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1514_hand.jpg", + "images/2025-01-13-16-27-57_1517_hand.jpg", + "images/2025-01-13-16-27-57_1520_hand.jpg", + "images/2025-01-13-16-27-57_1523_hand.jpg", + "images/2025-01-13-16-27-57_1526_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1538_hand.jpg", + "images/2025-01-13-16-27-57_1541_hand.jpg", + "images/2025-01-13-16-27-57_1544_hand.jpg", + "images/2025-01-13-16-27-57_1547_hand.jpg", + "images/2025-01-13-16-27-57_1550_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1608_hand.jpg", + "images/2025-01-13-16-27-57_1611_hand.jpg", + "images/2025-01-13-16-27-57_1614_hand.jpg", + "images/2025-01-13-16-27-57_1617_hand.jpg", + "images/2025-01-13-16-27-57_1620_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1622_hand.jpg", + "images/2025-01-13-16-27-57_1625_hand.jpg", + "images/2025-01-13-16-27-57_1628_hand.jpg", + "images/2025-01-13-16-27-57_1631_hand.jpg", + "images/2025-01-13-16-27-57_1634_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1635_hand.jpg", + "images/2025-01-13-16-27-57_1638_hand.jpg", + "images/2025-01-13-16-27-57_1641_hand.jpg", + "images/2025-01-13-16-27-57_1644_hand.jpg", + "images/2025-01-13-16-27-57_1647_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2016_hand.jpg", + "images/2025-01-13-16-27-57_2019_hand.jpg", + "images/2025-01-13-16-27-57_2022_hand.jpg", + "images/2025-01-13-16-27-57_2025_hand.jpg", + "images/2025-01-13-16-27-57_2028_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2057_hand.jpg", + "images/2025-01-13-16-27-57_2060_hand.jpg", + "images/2025-01-13-16-27-57_2063_hand.jpg", + "images/2025-01-13-16-27-57_2066_hand.jpg", + "images/2025-01-13-16-27-57_2069_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2098_hand.jpg", + "images/2025-01-13-16-27-57_2101_hand.jpg", + "images/2025-01-13-16-27-57_2104_hand.jpg", + "images/2025-01-13-16-27-57_2107_hand.jpg", + "images/2025-01-13-16-27-57_2110_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2204_hand.jpg", + "images/2025-01-13-16-27-57_2207_hand.jpg", + "images/2025-01-13-16-27-57_2210_hand.jpg", + "images/2025-01-13-16-27-57_2213_hand.jpg", + "images/2025-01-13-16-27-57_2216_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2220_hand.jpg", + "images/2025-01-13-16-27-57_2223_hand.jpg", + "images/2025-01-13-16-27-57_2226_hand.jpg", + "images/2025-01-13-16-27-57_2229_hand.jpg", + "images/2025-01-13-16-27-57_2232_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2235_hand.jpg", + "images/2025-01-13-16-27-57_2238_hand.jpg", + "images/2025-01-13-16-27-57_2241_hand.jpg", + "images/2025-01-13-16-27-57_2244_hand.jpg", + "images/2025-01-13-16-27-57_2247_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2353_hand.jpg", + "images/2025-01-13-16-27-57_2356_hand.jpg", + "images/2025-01-13-16-27-57_2359_hand.jpg", + "images/2025-01-13-16-27-57_2362_hand.jpg", + "images/2025-01-13-16-27-57_2365_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2400_hand.jpg", + "images/2025-01-13-16-27-57_2403_hand.jpg", + "images/2025-01-13-16-27-57_2406_hand.jpg", + "images/2025-01-13-16-27-57_2409_hand.jpg", + "images/2025-01-13-16-27-57_2412_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2448_hand.jpg", + "images/2025-01-13-16-27-57_2451_hand.jpg", + "images/2025-01-13-16-27-57_2454_hand.jpg", + "images/2025-01-13-16-27-57_2457_hand.jpg", + "images/2025-01-13-16-27-57_2460_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2565_hand.jpg", + "images/2025-01-13-16-27-57_2568_hand.jpg", + "images/2025-01-13-16-27-57_2571_hand.jpg", + "images/2025-01-13-16-27-57_2574_hand.jpg", + "images/2025-01-13-16-27-57_2577_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2580_hand.jpg", + "images/2025-01-13-16-27-57_2583_hand.jpg", + "images/2025-01-13-16-27-57_2586_hand.jpg", + "images/2025-01-13-16-27-57_2589_hand.jpg", + "images/2025-01-13-16-27-57_2592_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2596_hand.jpg", + "images/2025-01-13-16-27-57_2599_hand.jpg", + "images/2025-01-13-16-27-57_2602_hand.jpg", + "images/2025-01-13-16-27-57_2605_hand.jpg", + "images/2025-01-13-16-27-57_2608_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2686_hand.jpg", + "images/2025-01-13-16-27-57_2689_hand.jpg", + "images/2025-01-13-16-27-57_2692_hand.jpg", + "images/2025-01-13-16-27-57_2695_hand.jpg", + "images/2025-01-13-16-27-57_2698_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2720_hand.jpg", + "images/2025-01-13-16-27-57_2723_hand.jpg", + "images/2025-01-13-16-27-57_2726_hand.jpg", + "images/2025-01-13-16-27-57_2729_hand.jpg", + "images/2025-01-13-16-27-57_2732_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2753_hand.jpg", + "images/2025-01-13-16-27-57_2756_hand.jpg", + "images/2025-01-13-16-27-57_2759_hand.jpg", + "images/2025-01-13-16-27-57_2762_hand.jpg", + "images/2025-01-13-16-27-57_2765_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2851_hand.jpg", + "images/2025-01-13-16-27-57_2854_hand.jpg", + "images/2025-01-13-16-27-57_2857_hand.jpg", + "images/2025-01-13-16-27-57_2860_hand.jpg", + "images/2025-01-13-16-27-57_2863_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2870_hand.jpg", + "images/2025-01-13-16-27-57_2873_hand.jpg", + "images/2025-01-13-16-27-57_2876_hand.jpg", + "images/2025-01-13-16-27-57_2879_hand.jpg", + "images/2025-01-13-16-27-57_2882_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2890_hand.jpg", + "images/2025-01-13-16-27-57_2893_hand.jpg", + "images/2025-01-13-16-27-57_2896_hand.jpg", + "images/2025-01-13-16-27-57_2899_hand.jpg", + "images/2025-01-13-16-27-57_2902_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2998_hand.jpg", + "images/2025-01-13-16-27-57_3001_hand.jpg", + "images/2025-01-13-16-27-57_3004_hand.jpg", + "images/2025-01-13-16-27-57_3007_hand.jpg", + "images/2025-01-13-16-27-57_3010_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3036_hand.jpg", + "images/2025-01-13-16-27-57_3039_hand.jpg", + "images/2025-01-13-16-27-57_3042_hand.jpg", + "images/2025-01-13-16-27-57_3045_hand.jpg", + "images/2025-01-13-16-27-57_3048_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3073_hand.jpg", + "images/2025-01-13-16-27-57_3076_hand.jpg", + "images/2025-01-13-16-27-57_3079_hand.jpg", + "images/2025-01-13-16-27-57_3082_hand.jpg", + "images/2025-01-13-16-27-57_3085_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3191_hand.jpg", + "images/2025-01-13-16-27-57_3194_hand.jpg", + "images/2025-01-13-16-27-57_3197_hand.jpg", + "images/2025-01-13-16-27-57_3200_hand.jpg", + "images/2025-01-13-16-27-57_3203_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3216_hand.jpg", + "images/2025-01-13-16-27-57_3219_hand.jpg", + "images/2025-01-13-16-27-57_3222_hand.jpg", + "images/2025-01-13-16-27-57_3225_hand.jpg", + "images/2025-01-13-16-27-57_3228_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3240_hand.jpg", + "images/2025-01-13-16-27-57_3243_hand.jpg", + "images/2025-01-13-16-27-57_3246_hand.jpg", + "images/2025-01-13-16-27-57_3249_hand.jpg", + "images/2025-01-13-16-27-57_3252_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3349_hand.jpg", + "images/2025-01-13-16-27-57_3352_hand.jpg", + "images/2025-01-13-16-27-57_3355_hand.jpg", + "images/2025-01-13-16-27-57_3358_hand.jpg", + "images/2025-01-13-16-27-57_3361_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3382_hand.jpg", + "images/2025-01-13-16-27-57_3385_hand.jpg", + "images/2025-01-13-16-27-57_3388_hand.jpg", + "images/2025-01-13-16-27-57_3391_hand.jpg", + "images/2025-01-13-16-27-57_3394_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3416_hand.jpg", + "images/2025-01-13-16-27-57_3419_hand.jpg", + "images/2025-01-13-16-27-57_3422_hand.jpg", + "images/2025-01-13-16-27-57_3425_hand.jpg", + "images/2025-01-13-16-27-57_3428_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3514_hand.jpg", + "images/2025-01-13-16-27-57_3517_hand.jpg", + "images/2025-01-13-16-27-57_3520_hand.jpg", + "images/2025-01-13-16-27-57_3523_hand.jpg", + "images/2025-01-13-16-27-57_3526_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3532_hand.jpg", + "images/2025-01-13-16-27-57_3535_hand.jpg", + "images/2025-01-13-16-27-57_3538_hand.jpg", + "images/2025-01-13-16-27-57_3541_hand.jpg", + "images/2025-01-13-16-27-57_3544_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3550_hand.jpg", + "images/2025-01-13-16-27-57_3553_hand.jpg", + "images/2025-01-13-16-27-57_3556_hand.jpg", + "images/2025-01-13-16-27-57_3559_hand.jpg", + "images/2025-01-13-16-27-57_3562_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3685_hand.jpg", + "images/2025-01-13-16-27-57_3688_hand.jpg", + "images/2025-01-13-16-27-57_3691_hand.jpg", + "images/2025-01-13-16-27-57_3694_hand.jpg", + "images/2025-01-13-16-27-57_3697_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3738_hand.jpg", + "images/2025-01-13-16-27-57_3741_hand.jpg", + "images/2025-01-13-16-27-57_3744_hand.jpg", + "images/2025-01-13-16-27-57_3747_hand.jpg", + "images/2025-01-13-16-27-57_3750_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3790_hand.jpg", + "images/2025-01-13-16-27-57_3793_hand.jpg", + "images/2025-01-13-16-27-57_3796_hand.jpg", + "images/2025-01-13-16-27-57_3799_hand.jpg", + "images/2025-01-13-16-27-57_3802_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3937_hand.jpg", + "images/2025-01-13-16-27-57_3940_hand.jpg", + "images/2025-01-13-16-27-57_3943_hand.jpg", + "images/2025-01-13-16-27-57_3946_hand.jpg", + "images/2025-01-13-16-27-57_3949_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3962_hand.jpg", + "images/2025-01-13-16-27-57_3965_hand.jpg", + "images/2025-01-13-16-27-57_3968_hand.jpg", + "images/2025-01-13-16-27-57_3971_hand.jpg", + "images/2025-01-13-16-27-57_3974_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3988_hand.jpg", + "images/2025-01-13-16-27-57_3991_hand.jpg", + "images/2025-01-13-16-27-57_3994_hand.jpg", + "images/2025-01-13-16-27-57_3997_hand.jpg", + "images/2025-01-13-16-27-57_4000_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4470_hand.jpg", + "images/2025-01-13-16-27-57_4473_hand.jpg", + "images/2025-01-13-16-27-57_4476_hand.jpg", + "images/2025-01-13-16-27-57_4479_hand.jpg", + "images/2025-01-13-16-27-57_4482_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4500_hand.jpg", + "images/2025-01-13-16-27-57_4503_hand.jpg", + "images/2025-01-13-16-27-57_4506_hand.jpg", + "images/2025-01-13-16-27-57_4509_hand.jpg", + "images/2025-01-13-16-27-57_4512_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4530_hand.jpg", + "images/2025-01-13-16-27-57_4533_hand.jpg", + "images/2025-01-13-16-27-57_4536_hand.jpg", + "images/2025-01-13-16-27-57_4539_hand.jpg", + "images/2025-01-13-16-27-57_4542_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4626_hand.jpg", + "images/2025-01-13-16-27-57_4629_hand.jpg", + "images/2025-01-13-16-27-57_4632_hand.jpg", + "images/2025-01-13-16-27-57_4635_hand.jpg", + "images/2025-01-13-16-27-57_4638_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4646_hand.jpg", + "images/2025-01-13-16-27-57_4649_hand.jpg", + "images/2025-01-13-16-27-57_4652_hand.jpg", + "images/2025-01-13-16-27-57_4655_hand.jpg", + "images/2025-01-13-16-27-57_4658_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4666_hand.jpg", + "images/2025-01-13-16-27-57_4669_hand.jpg", + "images/2025-01-13-16-27-57_4672_hand.jpg", + "images/2025-01-13-16-27-57_4675_hand.jpg", + "images/2025-01-13-16-27-57_4678_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4774_hand.jpg", + "images/2025-01-13-16-27-57_4777_hand.jpg", + "images/2025-01-13-16-27-57_4780_hand.jpg", + "images/2025-01-13-16-27-57_4783_hand.jpg", + "images/2025-01-13-16-27-57_4786_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4811_hand.jpg", + "images/2025-01-13-16-27-57_4814_hand.jpg", + "images/2025-01-13-16-27-57_4817_hand.jpg", + "images/2025-01-13-16-27-57_4820_hand.jpg", + "images/2025-01-13-16-27-57_4823_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4848_hand.jpg", + "images/2025-01-13-16-27-57_4851_hand.jpg", + "images/2025-01-13-16-27-57_4854_hand.jpg", + "images/2025-01-13-16-27-57_4857_hand.jpg", + "images/2025-01-13-16-27-57_4860_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4945_hand.jpg", + "images/2025-01-13-16-27-57_4948_hand.jpg", + "images/2025-01-13-16-27-57_4951_hand.jpg", + "images/2025-01-13-16-27-57_4954_hand.jpg", + "images/2025-01-13-16-27-57_4957_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4959_hand.jpg", + "images/2025-01-13-16-27-57_4962_hand.jpg", + "images/2025-01-13-16-27-57_4965_hand.jpg", + "images/2025-01-13-16-27-57_4968_hand.jpg", + "images/2025-01-13-16-27-57_4971_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4973_hand.jpg", + "images/2025-01-13-16-27-57_4976_hand.jpg", + "images/2025-01-13-16-27-57_4979_hand.jpg", + "images/2025-01-13-16-27-57_4982_hand.jpg", + "images/2025-01-13-16-27-57_4985_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5056_hand.jpg", + "images/2025-01-13-16-27-57_5059_hand.jpg", + "images/2025-01-13-16-27-57_5062_hand.jpg", + "images/2025-01-13-16-27-57_5065_hand.jpg", + "images/2025-01-13-16-27-57_5068_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5087_hand.jpg", + "images/2025-01-13-16-27-57_5090_hand.jpg", + "images/2025-01-13-16-27-57_5093_hand.jpg", + "images/2025-01-13-16-27-57_5096_hand.jpg", + "images/2025-01-13-16-27-57_5099_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5118_hand.jpg", + "images/2025-01-13-16-27-57_5121_hand.jpg", + "images/2025-01-13-16-27-57_5124_hand.jpg", + "images/2025-01-13-16-27-57_5127_hand.jpg", + "images/2025-01-13-16-27-57_5130_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5206_hand.jpg", + "images/2025-01-13-16-27-57_5209_hand.jpg", + "images/2025-01-13-16-27-57_5212_hand.jpg", + "images/2025-01-13-16-27-57_5215_hand.jpg", + "images/2025-01-13-16-27-57_5218_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5222_hand.jpg", + "images/2025-01-13-16-27-57_5225_hand.jpg", + "images/2025-01-13-16-27-57_5228_hand.jpg", + "images/2025-01-13-16-27-57_5231_hand.jpg", + "images/2025-01-13-16-27-57_5234_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5238_hand.jpg", + "images/2025-01-13-16-27-57_5241_hand.jpg", + "images/2025-01-13-16-27-57_5244_hand.jpg", + "images/2025-01-13-16-27-57_5247_hand.jpg", + "images/2025-01-13-16-27-57_5250_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5340_hand.jpg", + "images/2025-01-13-16-27-57_5343_hand.jpg", + "images/2025-01-13-16-27-57_5346_hand.jpg", + "images/2025-01-13-16-27-57_5349_hand.jpg", + "images/2025-01-13-16-27-57_5352_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5376_hand.jpg", + "images/2025-01-13-16-27-57_5379_hand.jpg", + "images/2025-01-13-16-27-57_5382_hand.jpg", + "images/2025-01-13-16-27-57_5385_hand.jpg", + "images/2025-01-13-16-27-57_5388_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5412_hand.jpg", + "images/2025-01-13-16-27-57_5415_hand.jpg", + "images/2025-01-13-16-27-57_5418_hand.jpg", + "images/2025-01-13-16-27-57_5421_hand.jpg", + "images/2025-01-13-16-27-57_5424_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5527_hand.jpg", + "images/2025-01-13-16-27-57_5530_hand.jpg", + "images/2025-01-13-16-27-57_5533_hand.jpg", + "images/2025-01-13-16-27-57_5536_hand.jpg", + "images/2025-01-13-16-27-57_5539_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5550_hand.jpg", + "images/2025-01-13-16-27-57_5553_hand.jpg", + "images/2025-01-13-16-27-57_5556_hand.jpg", + "images/2025-01-13-16-27-57_5559_hand.jpg", + "images/2025-01-13-16-27-57_5562_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5574_hand.jpg", + "images/2025-01-13-16-27-57_5577_hand.jpg", + "images/2025-01-13-16-27-57_5580_hand.jpg", + "images/2025-01-13-16-27-57_5583_hand.jpg", + "images/2025-01-13-16-27-57_5586_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5726_hand.jpg", + "images/2025-01-13-16-27-57_5729_hand.jpg", + "images/2025-01-13-16-27-57_5732_hand.jpg", + "images/2025-01-13-16-27-57_5735_hand.jpg", + "images/2025-01-13-16-27-57_5738_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5784_hand.jpg", + "images/2025-01-13-16-27-57_5787_hand.jpg", + "images/2025-01-13-16-27-57_5790_hand.jpg", + "images/2025-01-13-16-27-57_5793_hand.jpg", + "images/2025-01-13-16-27-57_5796_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5842_hand.jpg", + "images/2025-01-13-16-27-57_5845_hand.jpg", + "images/2025-01-13-16-27-57_5848_hand.jpg", + "images/2025-01-13-16-27-57_5851_hand.jpg", + "images/2025-01-13-16-27-57_5854_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5996_hand.jpg", + "images/2025-01-13-16-27-57_5999_hand.jpg", + "images/2025-01-13-16-27-57_6002_hand.jpg", + "images/2025-01-13-16-27-57_6005_hand.jpg", + "images/2025-01-13-16-27-57_6008_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_6020_hand.jpg", + "images/2025-01-13-16-27-57_6023_hand.jpg", + "images/2025-01-13-16-27-57_6026_hand.jpg", + "images/2025-01-13-16-27-57_6029_hand.jpg", + "images/2025-01-13-16-27-57_6032_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-16-27-57_6045_hand.jpg", + "images/2025-01-13-16-27-57_6048_hand.jpg", + "images/2025-01-13-16-27-57_6051_hand.jpg", + "images/2025-01-13-16-27-57_6054_hand.jpg", + "images/2025-01-13-16-27-57_6057_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Lift\nC. Grasp\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Grasp" + } + ], + "images": [ + "images/2025-01-13-16-27-57_127_hand.jpg", + "images/2025-01-13-16-27-57_130_hand.jpg", + "images/2025-01-13-16-27-57_133_hand.jpg", + "images/2025-01-13-16-27-57_136_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Lift\nC. Pull\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Pull" + } + ], + "images": [ + "images/2025-01-13-16-27-57_222_hand.jpg", + "images/2025-01-13-16-27-57_225_hand.jpg", + "images/2025-01-13-16-27-57_228_hand.jpg", + "images/2025-01-13-16-27-57_231_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Approach\nB. Twist\nC. Release\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Approach" + } + ], + "images": [ + "images/2025-01-13-16-27-57_265_hand.jpg", + "images/2025-01-13-16-27-57_268_hand.jpg", + "images/2025-01-13-16-27-57_271_hand.jpg", + "images/2025-01-13-16-27-57_274_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Release\nB. Push\nC. Lift\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_303_hand.jpg", + "images/2025-01-13-16-27-57_306_hand.jpg", + "images/2025-01-13-16-27-57_309_hand.jpg", + "images/2025-01-13-16-27-57_312_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Lift\nB. Twist\nC. Align\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Grasp" + } + ], + "images": [ + "images/2025-01-13-16-27-57_382_hand.jpg", + "images/2025-01-13-16-27-57_385_hand.jpg", + "images/2025-01-13-16-27-57_388_hand.jpg", + "images/2025-01-13-16-27-57_391_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Pull\nB. Align\nC. Nudge\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Pull" + } + ], + "images": [ + "images/2025-01-13-16-27-57_476_hand.jpg", + "images/2025-01-13-16-27-57_479_hand.jpg", + "images/2025-01-13-16-27-57_482_hand.jpg", + "images/2025-01-13-16-27-57_485_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Approach\nB. Nudge\nC. Grasp\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Approach" + } + ], + "images": [ + "images/2025-01-13-16-27-57_534_hand.jpg", + "images/2025-01-13-16-27-57_537_hand.jpg", + "images/2025-01-13-16-27-57_540_hand.jpg", + "images/2025-01-13-16-27-57_543_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Twist\nB. Push\nC. Release\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_575_hand.jpg", + "images/2025-01-13-16-27-57_578_hand.jpg", + "images/2025-01-13-16-27-57_581_hand.jpg", + "images/2025-01-13-16-27-57_584_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Pull\nB. Twist\nC. Grasp\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Grasp" + } + ], + "images": [ + "images/2025-01-13-16-27-57_670_hand.jpg", + "images/2025-01-13-16-27-57_673_hand.jpg", + "images/2025-01-13-16-27-57_676_hand.jpg", + "images/2025-01-13-16-27-57_679_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Pull\nB. Lift\nC. Release\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Pull" + } + ], + "images": [ + "images/2025-01-13-16-27-57_780_hand.jpg", + "images/2025-01-13-16-27-57_783_hand.jpg", + "images/2025-01-13-16-27-57_786_hand.jpg", + "images/2025-01-13-16-27-57_789_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Grasp\nB. Approach\nC. Lift\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Approach" + } + ], + "images": [ + "images/2025-01-13-16-27-57_854_hand.jpg", + "images/2025-01-13-16-27-57_857_hand.jpg", + "images/2025-01-13-16-27-57_860_hand.jpg", + "images/2025-01-13-16-27-57_863_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Release\nC. Approach\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_909_hand.jpg", + "images/2025-01-13-16-27-57_912_hand.jpg", + "images/2025-01-13-16-27-57_915_hand.jpg", + "images/2025-01-13-16-27-57_918_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Grasp\nB. Push\nC. Release\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Grasp" + } + ], + "images": [ + "images/2025-01-13-16-27-57_989_hand.jpg", + "images/2025-01-13-16-27-57_992_hand.jpg", + "images/2025-01-13-16-27-57_995_hand.jpg", + "images/2025-01-13-16-27-57_998_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Pull\nB. Align\nC. Push\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Pull" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1078_hand.jpg", + "images/2025-01-13-16-27-57_1081_hand.jpg", + "images/2025-01-13-16-27-57_1084_hand.jpg", + "images/2025-01-13-16-27-57_1087_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Twist\nB. Push\nC. Approach\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Approach" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1122_hand.jpg", + "images/2025-01-13-16-27-57_1125_hand.jpg", + "images/2025-01-13-16-27-57_1128_hand.jpg", + "images/2025-01-13-16-27-57_1131_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Release\nB. Push\nC. Nudge\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1157_hand.jpg", + "images/2025-01-13-16-27-57_1160_hand.jpg", + "images/2025-01-13-16-27-57_1163_hand.jpg", + "images/2025-01-13-16-27-57_1166_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Pull\nB. Lift\nC. Push\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Grasp" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1216_hand.jpg", + "images/2025-01-13-16-27-57_1219_hand.jpg", + "images/2025-01-13-16-27-57_1222_hand.jpg", + "images/2025-01-13-16-27-57_1225_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Lift\nC. Grasp\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Pull" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1298_hand.jpg", + "images/2025-01-13-16-27-57_1301_hand.jpg", + "images/2025-01-13-16-27-57_1304_hand.jpg", + "images/2025-01-13-16-27-57_1307_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Lift\nB. Nudge\nC. Approach\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Approach" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1377_hand.jpg", + "images/2025-01-13-16-27-57_1380_hand.jpg", + "images/2025-01-13-16-27-57_1383_hand.jpg", + "images/2025-01-13-16-27-57_1386_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Release\nB. Pull\nC. Align\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1435_hand.jpg", + "images/2025-01-13-16-27-57_1438_hand.jpg", + "images/2025-01-13-16-27-57_1441_hand.jpg", + "images/2025-01-13-16-27-57_1444_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Grasp\nB. Approach\nC. Align\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Grasp" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1502_hand.jpg", + "images/2025-01-13-16-27-57_1505_hand.jpg", + "images/2025-01-13-16-27-57_1508_hand.jpg", + "images/2025-01-13-16-27-57_1511_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Pull\nB. Approach\nC. Grasp\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Pull" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1572_hand.jpg", + "images/2025-01-13-16-27-57_1575_hand.jpg", + "images/2025-01-13-16-27-57_1578_hand.jpg", + "images/2025-01-13-16-27-57_1581_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Twist\nB. Release\nC. Pull\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Approach" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1614_hand.jpg", + "images/2025-01-13-16-27-57_1617_hand.jpg", + "images/2025-01-13-16-27-57_1620_hand.jpg", + "images/2025-01-13-16-27-57_1623_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Push\nB. Pull\nC. Release\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1652_hand.jpg", + "images/2025-01-13-16-27-57_1655_hand.jpg", + "images/2025-01-13-16-27-57_1658_hand.jpg", + "images/2025-01-13-16-27-57_1661_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Grasp\nB. Lift\nC. Release\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Grasp" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1714_hand.jpg", + "images/2025-01-13-16-27-57_1717_hand.jpg", + "images/2025-01-13-16-27-57_1720_hand.jpg", + "images/2025-01-13-16-27-57_1723_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Lift\nC. Twist\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Pull" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1854_hand.jpg", + "images/2025-01-13-16-27-57_1857_hand.jpg", + "images/2025-01-13-16-27-57_1860_hand.jpg", + "images/2025-01-13-16-27-57_1863_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Pull\nB. Nudge\nC. Lift\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Grasp" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2026_hand.jpg", + "images/2025-01-13-16-27-57_2029_hand.jpg", + "images/2025-01-13-16-27-57_2032_hand.jpg", + "images/2025-01-13-16-27-57_2035_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Grasp\nB. Lift\nC. Pull\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Pull" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2143_hand.jpg", + "images/2025-01-13-16-27-57_2146_hand.jpg", + "images/2025-01-13-16-27-57_2149_hand.jpg", + "images/2025-01-13-16-27-57_2152_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Grasp\nB. Approach\nC. Release\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Approach" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2210_hand.jpg", + "images/2025-01-13-16-27-57_2213_hand.jpg", + "images/2025-01-13-16-27-57_2216_hand.jpg", + "images/2025-01-13-16-27-57_2219_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Lift\nB. Approach\nC. Release\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2255_hand.jpg", + "images/2025-01-13-16-27-57_2258_hand.jpg", + "images/2025-01-13-16-27-57_2261_hand.jpg", + "images/2025-01-13-16-27-57_2264_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Approach\nB. Release\nC. Grasp\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Grasp" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2362_hand.jpg", + "images/2025-01-13-16-27-57_2365_hand.jpg", + "images/2025-01-13-16-27-57_2368_hand.jpg", + "images/2025-01-13-16-27-57_2371_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Twist\nB. Lift\nC. Align\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Twist" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2497_hand.jpg", + "images/2025-01-13-16-27-57_2500_hand.jpg", + "images/2025-01-13-16-27-57_2503_hand.jpg", + "images/2025-01-13-16-27-57_2506_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Pull\nB. Twist\nC. Grasp\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Approach" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2573_hand.jpg", + "images/2025-01-13-16-27-57_2576_hand.jpg", + "images/2025-01-13-16-27-57_2579_hand.jpg", + "images/2025-01-13-16-27-57_2582_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Approach\nB. Release\nC. Pull\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2617_hand.jpg", + "images/2025-01-13-16-27-57_2620_hand.jpg", + "images/2025-01-13-16-27-57_2623_hand.jpg", + "images/2025-01-13-16-27-57_2626_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Twist\nC. Grasp\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Grasp" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2712_hand.jpg", + "images/2025-01-13-16-27-57_2715_hand.jpg", + "images/2025-01-13-16-27-57_2718_hand.jpg", + "images/2025-01-13-16-27-57_2721_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Release\nB. Approach\nC. Pull\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Pull" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2806_hand.jpg", + "images/2025-01-13-16-27-57_2809_hand.jpg", + "images/2025-01-13-16-27-57_2812_hand.jpg", + "images/2025-01-13-16-27-57_2815_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Lift\nB. Pull\nC. Approach\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Approach" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2859_hand.jpg", + "images/2025-01-13-16-27-57_2862_hand.jpg", + "images/2025-01-13-16-27-57_2865_hand.jpg", + "images/2025-01-13-16-27-57_2868_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Lift\nB. Pull\nC. Release\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2915_hand.jpg", + "images/2025-01-13-16-27-57_2918_hand.jpg", + "images/2025-01-13-16-27-57_2921_hand.jpg", + "images/2025-01-13-16-27-57_2924_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Twist\nB. Pull\nC. Lift\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Grasp" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3017_hand.jpg", + "images/2025-01-13-16-27-57_3020_hand.jpg", + "images/2025-01-13-16-27-57_3023_hand.jpg", + "images/2025-01-13-16-27-57_3026_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Twist\nB. Lift\nC. Pull\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Pull" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3126_hand.jpg", + "images/2025-01-13-16-27-57_3129_hand.jpg", + "images/2025-01-13-16-27-57_3132_hand.jpg", + "images/2025-01-13-16-27-57_3135_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Pull\nB. Approach\nC. Lift\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Approach" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3208_hand.jpg", + "images/2025-01-13-16-27-57_3211_hand.jpg", + "images/2025-01-13-16-27-57_3214_hand.jpg", + "images/2025-01-13-16-27-57_3217_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Approach\nB. Release\nC. Pull\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3278_hand.jpg", + "images/2025-01-13-16-27-57_3281_hand.jpg", + "images/2025-01-13-16-27-57_3284_hand.jpg", + "images/2025-01-13-16-27-57_3287_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Grasp\nB. Twist\nC. Approach\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Grasp" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3364_hand.jpg", + "images/2025-01-13-16-27-57_3367_hand.jpg", + "images/2025-01-13-16-27-57_3370_hand.jpg", + "images/2025-01-13-16-27-57_3373_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Release\nB. Pull\nC. Twist\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Pull" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3460_hand.jpg", + "images/2025-01-13-16-27-57_3463_hand.jpg", + "images/2025-01-13-16-27-57_3466_hand.jpg", + "images/2025-01-13-16-27-57_3469_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Approach\nB. Nudge\nC. Grasp\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Approach" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3526_hand.jpg", + "images/2025-01-13-16-27-57_3529_hand.jpg", + "images/2025-01-13-16-27-57_3532_hand.jpg", + "images/2025-01-13-16-27-57_3535_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Lift\nB. Twist\nC. Approach\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3580_hand.jpg", + "images/2025-01-13-16-27-57_3583_hand.jpg", + "images/2025-01-13-16-27-57_3586_hand.jpg", + "images/2025-01-13-16-27-57_3589_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Nudge\nB. Release\nC. Align\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Grasp" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3662_hand.jpg", + "images/2025-01-13-16-27-57_3665_hand.jpg", + "images/2025-01-13-16-27-57_3668_hand.jpg", + "images/2025-01-13-16-27-57_3671_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Nudge\nB. Pull\nC. Align\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Pull" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3814_hand.jpg", + "images/2025-01-13-16-27-57_3817_hand.jpg", + "images/2025-01-13-16-27-57_3820_hand.jpg", + "images/2025-01-13-16-27-57_3823_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Approach\nB. Release\nC. Nudge\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Approach" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3956_hand.jpg", + "images/2025-01-13-16-27-57_3959_hand.jpg", + "images/2025-01-13-16-27-57_3962_hand.jpg", + "images/2025-01-13-16-27-57_3965_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Nudge\nC. Release\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4028_hand.jpg", + "images/2025-01-13-16-27-57_4031_hand.jpg", + "images/2025-01-13-16-27-57_4034_hand.jpg", + "images/2025-01-13-16-27-57_4037_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Approach\nB. Push\nC. Twist\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Grasp" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4140_hand.jpg", + "images/2025-01-13-16-27-57_4143_hand.jpg", + "images/2025-01-13-16-27-57_4146_hand.jpg", + "images/2025-01-13-16-27-57_4149_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Twist\nC. Lift\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Twist" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4329_hand.jpg", + "images/2025-01-13-16-27-57_4332_hand.jpg", + "images/2025-01-13-16-27-57_4335_hand.jpg", + "images/2025-01-13-16-27-57_4338_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Pull\nB. Approach\nC. Push\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Grasp" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4474_hand.jpg", + "images/2025-01-13-16-27-57_4477_hand.jpg", + "images/2025-01-13-16-27-57_4480_hand.jpg", + "images/2025-01-13-16-27-57_4483_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Pull\nB. Twist\nC. Nudge\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Twist" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4548_hand.jpg", + "images/2025-01-13-16-27-57_4551_hand.jpg", + "images/2025-01-13-16-27-57_4554_hand.jpg", + "images/2025-01-13-16-27-57_4557_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Pull\nB. Align\nC. Push\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Pull" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4573_hand.jpg", + "images/2025-01-13-16-27-57_4576_hand.jpg", + "images/2025-01-13-16-27-57_4579_hand.jpg", + "images/2025-01-13-16-27-57_4582_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Approach\nC. Release\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Approach" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4638_hand.jpg", + "images/2025-01-13-16-27-57_4641_hand.jpg", + "images/2025-01-13-16-27-57_4644_hand.jpg", + "images/2025-01-13-16-27-57_4647_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Twist\nB. Nudge\nC. Grasp\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4696_hand.jpg", + "images/2025-01-13-16-27-57_4699_hand.jpg", + "images/2025-01-13-16-27-57_4702_hand.jpg", + "images/2025-01-13-16-27-57_4705_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Grasp\nC. Release\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Grasp" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4794_hand.jpg", + "images/2025-01-13-16-27-57_4797_hand.jpg", + "images/2025-01-13-16-27-57_4800_hand.jpg", + "images/2025-01-13-16-27-57_4803_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Lift\nB. Pull\nC. Release\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Pull" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4902_hand.jpg", + "images/2025-01-13-16-27-57_4905_hand.jpg", + "images/2025-01-13-16-27-57_4908_hand.jpg", + "images/2025-01-13-16-27-57_4911_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Lift\nB. Approach\nC. Release\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Approach" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4952_hand.jpg", + "images/2025-01-13-16-27-57_4955_hand.jpg", + "images/2025-01-13-16-27-57_4958_hand.jpg", + "images/2025-01-13-16-27-57_4961_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Grasp\nB. Nudge\nC. Release\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4992_hand.jpg", + "images/2025-01-13-16-27-57_4995_hand.jpg", + "images/2025-01-13-16-27-57_4998_hand.jpg", + "images/2025-01-13-16-27-57_5001_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Grasp\nB. Push\nC. Approach\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Grasp" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5071_hand.jpg", + "images/2025-01-13-16-27-57_5074_hand.jpg", + "images/2025-01-13-16-27-57_5077_hand.jpg", + "images/2025-01-13-16-27-57_5080_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Lift\nB. Pull\nC. Release\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Pull" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5158_hand.jpg", + "images/2025-01-13-16-27-57_5161_hand.jpg", + "images/2025-01-13-16-27-57_5164_hand.jpg", + "images/2025-01-13-16-27-57_5167_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Push\nB. Lift\nC. Pull\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Approach" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5214_hand.jpg", + "images/2025-01-13-16-27-57_5217_hand.jpg", + "images/2025-01-13-16-27-57_5220_hand.jpg", + "images/2025-01-13-16-27-57_5223_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Release\nB. Lift\nC. Pull\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5262_hand.jpg", + "images/2025-01-13-16-27-57_5265_hand.jpg", + "images/2025-01-13-16-27-57_5268_hand.jpg", + "images/2025-01-13-16-27-57_5271_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Pull\nB. Nudge\nC. Approach\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Grasp" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5352_hand.jpg", + "images/2025-01-13-16-27-57_5355_hand.jpg", + "images/2025-01-13-16-27-57_5358_hand.jpg", + "images/2025-01-13-16-27-57_5361_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Nudge\nB. Approach\nC. Twist\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Pull" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5457_hand.jpg", + "images/2025-01-13-16-27-57_5460_hand.jpg", + "images/2025-01-13-16-27-57_5463_hand.jpg", + "images/2025-01-13-16-27-57_5466_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Release\nB. Lift\nC. Align\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Approach" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5542_hand.jpg", + "images/2025-01-13-16-27-57_5545_hand.jpg", + "images/2025-01-13-16-27-57_5548_hand.jpg", + "images/2025-01-13-16-27-57_5551_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Pull\nB. Release\nC. Grasp\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5611_hand.jpg", + "images/2025-01-13-16-27-57_5614_hand.jpg", + "images/2025-01-13-16-27-57_5617_hand.jpg", + "images/2025-01-13-16-27-57_5620_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Lift\nB. Push\nC. Grasp\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Grasp" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5770_hand.jpg", + "images/2025-01-13-16-27-57_5773_hand.jpg", + "images/2025-01-13-16-27-57_5776_hand.jpg", + "images/2025-01-13-16-27-57_5779_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Pull\nB. Push\nC. Nudge\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Pull" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5935_hand.jpg", + "images/2025-01-13-16-27-57_5938_hand.jpg", + "images/2025-01-13-16-27-57_5941_hand.jpg", + "images/2025-01-13-16-27-57_5944_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Release\nB. Lift\nC. Pull\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Approach" + } + ], + "images": [ + "images/2025-01-13-16-27-57_6003_hand.jpg", + "images/2025-01-13-16-27-57_6006_hand.jpg", + "images/2025-01-13-16-27-57_6009_hand.jpg", + "images/2025-01-13-16-27-57_6012_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Lift\nB. Approach\nC. Release\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_6074_hand.jpg", + "images/2025-01-13-16-27-57_6077_hand.jpg", + "images/2025-01-13-16-27-57_6080_hand.jpg", + "images/2025-01-13-16-27-57_6083_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Remove USB', what low-level action should happen next?\nOptions:\nA. Push\nB. Twist\nC. Nudge\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Pull" + } + ], + "images": [ + "images/2025-01-13-16-27-57_156_hand.jpg", + "images/2025-01-13-16-27-57_159_hand.jpg", + "images/2025-01-13-16-27-57_162_hand.jpg", + "images/2025-01-13-16-27-57_165_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Place USB', what low-level action should happen next?\nOptions:\nA. Lift\nB. Align\nC. Nudge\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_275_hand.jpg", + "images/2025-01-13-16-27-57_278_hand.jpg", + "images/2025-01-13-16-27-57_281_hand.jpg", + "images/2025-01-13-16-27-57_284_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Remove square peg 4', what low-level action should happen next?\nOptions:\nA. Release\nB. Pull\nC. Lift\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Pull" + } + ], + "images": [ + "images/2025-01-13-16-27-57_406_hand.jpg", + "images/2025-01-13-16-27-57_409_hand.jpg", + "images/2025-01-13-16-27-57_412_hand.jpg", + "images/2025-01-13-16-27-57_415_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Place square peg 4', what low-level action should happen next?\nOptions:\nA. Align\nB. Pull\nC. Lift\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_546_hand.jpg", + "images/2025-01-13-16-27-57_549_hand.jpg", + "images/2025-01-13-16-27-57_552_hand.jpg", + "images/2025-01-13-16-27-57_555_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Remove square peg 3', what low-level action should happen next?\nOptions:\nA. Pull\nB. Align\nC. Release\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Pull" + } + ], + "images": [ + "images/2025-01-13-16-27-57_700_hand.jpg", + "images/2025-01-13-16-27-57_703_hand.jpg", + "images/2025-01-13-16-27-57_706_hand.jpg", + "images/2025-01-13-16-27-57_709_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Place square peg 3', what low-level action should happen next?\nOptions:\nA. Pull\nB. Nudge\nC. Release\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_870_hand.jpg", + "images/2025-01-13-16-27-57_873_hand.jpg", + "images/2025-01-13-16-27-57_876_hand.jpg", + "images/2025-01-13-16-27-57_879_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Remove round peg 3', what low-level action should happen next?\nOptions:\nA. Approach\nB. Pull\nC. Lift\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Pull" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1014_hand.jpg", + "images/2025-01-13-16-27-57_1017_hand.jpg", + "images/2025-01-13-16-27-57_1020_hand.jpg", + "images/2025-01-13-16-27-57_1023_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Place round peg 3', what low-level action should happen next?\nOptions:\nA. Twist\nB. Align\nC. Push\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1132_hand.jpg", + "images/2025-01-13-16-27-57_1135_hand.jpg", + "images/2025-01-13-16-27-57_1138_hand.jpg", + "images/2025-01-13-16-27-57_1141_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Remove large gear', what low-level action should happen next?\nOptions:\nA. Pull\nB. Release\nC. Lift\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Pull" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1234_hand.jpg", + "images/2025-01-13-16-27-57_1237_hand.jpg", + "images/2025-01-13-16-27-57_1240_hand.jpg", + "images/2025-01-13-16-27-57_1243_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Place large gear', what low-level action should happen next?\nOptions:\nA. Pull\nB. Twist\nC. Lift\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1394_hand.jpg", + "images/2025-01-13-16-27-57_1397_hand.jpg", + "images/2025-01-13-16-27-57_1400_hand.jpg", + "images/2025-01-13-16-27-57_1403_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Remove medium gear', what low-level action should happen next?\nOptions:\nA. Align\nB. Nudge\nC. Pull\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Pull" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1523_hand.jpg", + "images/2025-01-13-16-27-57_1526_hand.jpg", + "images/2025-01-13-16-27-57_1529_hand.jpg", + "images/2025-01-13-16-27-57_1532_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Place medium gear', what low-level action should happen next?\nOptions:\nA. Push\nB. Grasp\nC. Twist\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1625_hand.jpg", + "images/2025-01-13-16-27-57_1628_hand.jpg", + "images/2025-01-13-16-27-57_1631_hand.jpg", + "images/2025-01-13-16-27-57_1634_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Remove small gear', what low-level action should happen next?\nOptions:\nA. Push\nB. Align\nC. Nudge\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Pull" + } + ], + "images": [ + "images/2025-01-13-16-27-57_1734_hand.jpg", + "images/2025-01-13-16-27-57_1737_hand.jpg", + "images/2025-01-13-16-27-57_1740_hand.jpg", + "images/2025-01-13-16-27-57_1743_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Remove small gear', what low-level action should happen next?\nOptions:\nA. Pull\nB. Twist\nC. Nudge\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Pull" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2055_hand.jpg", + "images/2025-01-13-16-27-57_2058_hand.jpg", + "images/2025-01-13-16-27-57_2061_hand.jpg", + "images/2025-01-13-16-27-57_2064_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Place small gear', what low-level action should happen next?\nOptions:\nA. Release\nB. Twist\nC. Lift\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2222_hand.jpg", + "images/2025-01-13-16-27-57_2225_hand.jpg", + "images/2025-01-13-16-27-57_2228_hand.jpg", + "images/2025-01-13-16-27-57_2231_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Remove bolt 4', what low-level action should happen next?\nOptions:\nA. Nudge\nB. Approach\nC. Twist\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Twist" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2396_hand.jpg", + "images/2025-01-13-16-27-57_2399_hand.jpg", + "images/2025-01-13-16-27-57_2402_hand.jpg", + "images/2025-01-13-16-27-57_2405_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Place bolt 4', what low-level action should happen next?\nOptions:\nA. Release\nB. Twist\nC. Grasp\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2586_hand.jpg", + "images/2025-01-13-16-27-57_2589_hand.jpg", + "images/2025-01-13-16-27-57_2592_hand.jpg", + "images/2025-01-13-16-27-57_2595_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Remove D-SUB', what low-level action should happen next?\nOptions:\nA. Nudge\nB. Pull\nC. Align\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Pull" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2742_hand.jpg", + "images/2025-01-13-16-27-57_2745_hand.jpg", + "images/2025-01-13-16-27-57_2748_hand.jpg", + "images/2025-01-13-16-27-57_2751_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Place D-SUB', what low-level action should happen next?\nOptions:\nA. Grasp\nB. Release\nC. Lift\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_2874_hand.jpg", + "images/2025-01-13-16-27-57_2877_hand.jpg", + "images/2025-01-13-16-27-57_2880_hand.jpg", + "images/2025-01-13-16-27-57_2883_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Remove round peg 4', what low-level action should happen next?\nOptions:\nA. Lift\nB. Approach\nC. Pull\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Pull" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3048_hand.jpg", + "images/2025-01-13-16-27-57_3051_hand.jpg", + "images/2025-01-13-16-27-57_3054_hand.jpg", + "images/2025-01-13-16-27-57_3057_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Place round peg 4', what low-level action should happen next?\nOptions:\nA. Twist\nB. Release\nC. Pull\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3229_hand.jpg", + "images/2025-01-13-16-27-57_3232_hand.jpg", + "images/2025-01-13-16-27-57_3235_hand.jpg", + "images/2025-01-13-16-27-57_3238_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Remove square peg 1', what low-level action should happen next?\nOptions:\nA. Push\nB. Pull\nC. Align\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Pull" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3392_hand.jpg", + "images/2025-01-13-16-27-57_3395_hand.jpg", + "images/2025-01-13-16-27-57_3398_hand.jpg", + "images/2025-01-13-16-27-57_3401_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Place square peg 1', what low-level action should happen next?\nOptions:\nA. Grasp\nB. Push\nC. Pull\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3542_hand.jpg", + "images/2025-01-13-16-27-57_3545_hand.jpg", + "images/2025-01-13-16-27-57_3548_hand.jpg", + "images/2025-01-13-16-27-57_3551_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Remove waterproof', what low-level action should happen next?\nOptions:\nA. Nudge\nB. Twist\nC. Lift\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Pull" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3688_hand.jpg", + "images/2025-01-13-16-27-57_3691_hand.jpg", + "images/2025-01-13-16-27-57_3694_hand.jpg", + "images/2025-01-13-16-27-57_3697_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Place waterproof', what low-level action should happen next?\nOptions:\nA. Lift\nB. Pull\nC. Nudge\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_3978_hand.jpg", + "images/2025-01-13-16-27-57_3981_hand.jpg", + "images/2025-01-13-16-27-57_3984_hand.jpg", + "images/2025-01-13-16-27-57_3987_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Remove BNC', what low-level action should happen next?\nOptions:\nA. Twist\nB. Nudge\nC. Push\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Twist" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4176_hand.jpg", + "images/2025-01-13-16-27-57_4179_hand.jpg", + "images/2025-01-13-16-27-57_4182_hand.jpg", + "images/2025-01-13-16-27-57_4185_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Remove BNC', what low-level action should happen next?\nOptions:\nA. Nudge\nB. Twist\nC. Lift\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Twist" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4496_hand.jpg", + "images/2025-01-13-16-27-57_4499_hand.jpg", + "images/2025-01-13-16-27-57_4502_hand.jpg", + "images/2025-01-13-16-27-57_4505_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Remove BNC', what low-level action should happen next?\nOptions:\nA. Pull\nB. Grasp\nC. Align\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Pull" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4551_hand.jpg", + "images/2025-01-13-16-27-57_4554_hand.jpg", + "images/2025-01-13-16-27-57_4557_hand.jpg", + "images/2025-01-13-16-27-57_4560_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Place BNC', what low-level action should happen next?\nOptions:\nA. Nudge\nB. Push\nC. Pull\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4656_hand.jpg", + "images/2025-01-13-16-27-57_4659_hand.jpg", + "images/2025-01-13-16-27-57_4662_hand.jpg", + "images/2025-01-13-16-27-57_4665_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Remove round peg 2', what low-level action should happen next?\nOptions:\nA. Pull\nB. Nudge\nC. Align\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Pull" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4826_hand.jpg", + "images/2025-01-13-16-27-57_4829_hand.jpg", + "images/2025-01-13-16-27-57_4832_hand.jpg", + "images/2025-01-13-16-27-57_4835_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Place round peg 2', what low-level action should happen next?\nOptions:\nA. Align\nB. Push\nC. Release\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4963_hand.jpg", + "images/2025-01-13-16-27-57_4966_hand.jpg", + "images/2025-01-13-16-27-57_4969_hand.jpg", + "images/2025-01-13-16-27-57_4972_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Remove round peg 1', what low-level action should happen next?\nOptions:\nA. Pull\nB. Twist\nC. Lift\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Pull" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5096_hand.jpg", + "images/2025-01-13-16-27-57_5099_hand.jpg", + "images/2025-01-13-16-27-57_5102_hand.jpg", + "images/2025-01-13-16-27-57_5105_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Place round peg 1', what low-level action should happen next?\nOptions:\nA. Twist\nB. Align\nC. Pull\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5228_hand.jpg", + "images/2025-01-13-16-27-57_5231_hand.jpg", + "images/2025-01-13-16-27-57_5234_hand.jpg", + "images/2025-01-13-16-27-57_5237_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Remove square peg 2', what low-level action should happen next?\nOptions:\nA. Lift\nB. Pull\nC. Approach\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Pull" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5380_hand.jpg", + "images/2025-01-13-16-27-57_5383_hand.jpg", + "images/2025-01-13-16-27-57_5386_hand.jpg", + "images/2025-01-13-16-27-57_5389_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Place square peg 2', what low-level action should happen next?\nOptions:\nA. Twist\nB. Release\nC. Lift\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5563_hand.jpg", + "images/2025-01-13-16-27-57_5566_hand.jpg", + "images/2025-01-13-16-27-57_5569_hand.jpg", + "images/2025-01-13-16-27-57_5572_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Remove Ethernet', what low-level action should happen next?\nOptions:\nA. Align\nB. Pull\nC. Twist\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Pull" + } + ], + "images": [ + "images/2025-01-13-16-27-57_5823_hand.jpg", + "images/2025-01-13-16-27-57_5826_hand.jpg", + "images/2025-01-13-16-27-57_5829_hand.jpg", + "images/2025-01-13-16-27-57_5832_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Place Ethernet', what low-level action should happen next?\nOptions:\nA. Release\nB. Nudge\nC. Twist\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Release" + } + ], + "images": [ + "images/2025-01-13-16-27-57_6022_hand.jpg", + "images/2025-01-13-16-27-57_6025_hand.jpg", + "images/2025-01-13-16-27-57_6028_hand.jpg", + "images/2025-01-13-16-27-57_6031_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the high-level action 'Remove BNC', which action best fills [MASK] in this local chain: Grasp > [MASK] > Pull?\nOptions:\nA. Release\nB. Twist\nC. Nudge\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Twist" + } + ], + "images": [ + "images/2025-01-13-16-27-57_4548_hand.jpg", + "images/2025-01-13-16-27-57_4551_hand.jpg", + "images/2025-01-13-16-27-57_4554_hand.jpg", + "images/2025-01-13-16-27-57_4557_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T12", + "task_id": "2025-01-13-16-27-57", + "split": "eval", + "split_group_id": "2025-01-13-16-27-57", + "recording_id": "2025-01-13-16-27-57", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. approach\nC. transfer\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. pre-approach" + } + ], + "images": [ + "images/2025-01-13-17-57-45_36_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. contact\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_179_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. pre-approach\nC. release\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_333_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. hold and carry\nC. transfer\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. hold and carry" + } + ], + "images": [ + "images/2025-01-13-17-57-45_392_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. transfer\nC. pre-approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/2025-01-13-17-57-45_466_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. transfer\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_607_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. transfer\nC. pre-approach\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/2025-01-13-17-57-45_695_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. release\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/2025-01-13-17-57-45_851_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. hold and carry\nC. contact\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_1315_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. transfer\nC. release\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/2025-01-13-17-57-45_1684_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. pre-approach\nC. approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_1916_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. transfer\nC. release\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_2107_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. contact\nC. transfer\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_2255_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. pre-approach\nC. release\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. hold and carry" + } + ], + "images": [ + "images/2025-01-13-17-57-45_2394_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. transfer\nC. pre-approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/2025-01-13-17-57-45_2484_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. transfer\nC. hold and carry\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_2605_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. hold and carry\nC. approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_2810_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. transfer\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_2996_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. contact\nC. transfer\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3073_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. hold and carry\nC. approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. hold and carry" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3146_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. pre-approach\nC. release\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3254_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. contact\nC. transfer\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3434_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. release\nC. pre-approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3533_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. hold and carry\nC. contact\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3617_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. hold and carry\nC. approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. hold and carry" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3695_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. pre-approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3813_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. transfer\nC. pre-approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3996_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. contact\nC. approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4077_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. pre-approach\nC. release\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4181_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. release\nC. pre-approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4281_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. contact\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4361_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. hold and carry\nC. contact\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4476_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. contact\nC. transfer\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4564_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. transfer\nC. release\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4684_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. hold and carry\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. hold and carry" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4766_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. hold and carry\nC. transfer\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4851_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. hold and carry\nC. contact\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5027_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. contact\nC. hold and carry\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5134_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. hold and carry\nC. pre-approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5210_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. contact\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. hold and carry" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5281_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. transfer\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5375_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. transfer\nC. approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5522_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. hold and carry\nC. contact\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5722_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5946_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6036_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. contact\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6163_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. transfer\nC. contact\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6301_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. pre-approach\nC. transfer\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6337_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. contact\nC. release\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6434_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. hold and carry\nC. approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. hold and carry" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6533_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. transfer\nC. release\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6606_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. contact\nC. transfer\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6716_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. contact\nC. approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6776_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. pre-approach\nC. release\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6848_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. release\nC. approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. hold and carry" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6921_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. contact\nC. hold and carry\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6998_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. hold and carry\nC. transfer\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7082_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. pre-approach\nC. transfer\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7112_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. release\nC. hold and carry\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7190_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. pre-approach\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7292_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. hold and carry\nC. pre-approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. hold and carry" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7338_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. pre-approach\nC. release\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7414_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. hold and carry\nC. contact\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7491_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. release\nC. pre-approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7513_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. contact\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7585_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. contact\nC. hold and carry\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. hold and carry" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7659_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. pre-approach\nC. release\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7729_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. pre-approach\nC. transfer\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7855_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. pre-approach\nC. approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7929_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. contact\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8028_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. hold and carry\nC. approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. hold and carry" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8128_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. contact\nC. pre-approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8235_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. contact\nC. transfer\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8521_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. release\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8719_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. contact\nC. hold and carry\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8809_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. hold and carry\nC. pre-approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. hold and carry" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8898_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. transfer\nC. pre-approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/2025-01-13-17-57-45_9009_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. transfer\nC. contact\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_9154_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. approach\nC. transfer\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_9206_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. transfer\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_9307_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. hold and carry\nC. contact\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. hold and carry" + } + ], + "images": [ + "images/2025-01-13-17-57-45_9406_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. release\nC. approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/2025-01-13-17-57-45_9496_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. contact\nC. pre-approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_9777_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. transfer\nC. contact\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10053_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. transfer\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10292_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. release\nC. transfer\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10466_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. contact\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10556_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. approach\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. hold and carry" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10649_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. pre-approach\nC. transfer\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10707_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. approach\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10762_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. transfer\nC. contact\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10829_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. release\nC. pre-approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. hold and carry" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10894_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. contact\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10999_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. pre-approach\nC. hold and carry\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11130_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. contact\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11295_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. pre-approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11524_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. hold and carry\nC. release\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. hold and carry" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11624_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. release\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11731_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. contact\nC. transfer\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11909_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. pre-approach\nC. approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12064_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. release\nC. transfer\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12154_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. pre-approach\nC. contact\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12256_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. release\nC. hold and carry\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. hold and carry" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12358_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. approach\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12425_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. release\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12533_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. release\nC. pre-approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12641_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. approach\nC. contact\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12701_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. hold and carry\nC. pre-approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12781_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. contact\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12863_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. hold and carry\nC. pre-approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12927_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. contact\nC. hold and carry\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_13016_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. hold and carry\nC. approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_13061_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. pre-approach\nC. release\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. pre-approach" + } + ], + "images": [ + "images/2025-01-13-17-57-45_13094_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_36_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_179_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_333_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_392_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_466_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_607_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_695_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_851_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_1315_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_1684_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_1916_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_2107_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_2255_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_2394_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_2484_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_2605_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_2810_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_2996_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3073_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3146_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3254_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3434_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3533_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3617_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3695_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3813_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3996_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4077_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4181_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4281_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4361_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4476_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4564_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4684_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4766_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4851_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5027_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5134_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5210_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5281_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5375_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5522_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5722_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5946_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6036_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6163_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6301_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6337_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6434_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6533_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6606_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6716_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6776_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6848_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6921_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6998_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7082_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7112_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7190_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7292_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7338_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7414_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7491_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7513_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7585_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7659_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7729_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7855_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7929_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8028_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8128_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8235_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8521_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8719_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8809_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8898_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_9009_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_9154_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_9206_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_9307_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_9406_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_9496_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_9777_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10053_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10292_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10466_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10556_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10649_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10707_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10762_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10829_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10894_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10999_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11130_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11295_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11524_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11624_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11731_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11909_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12064_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12154_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12256_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12358_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12425_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12533_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12641_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12701_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12781_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12863_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12927_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_13016_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-13-17-57-45_13061_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/2025-01-13-17-57-45_13094_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_14_hand.jpg", + "images/2025-01-13-17-57-45_25_hand.jpg", + "images/2025-01-13-17-57-45_36_hand.jpg", + "images/2025-01-13-17-57-45_47_hand.jpg", + "images/2025-01-13-17-57-45_58_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_113_hand.jpg", + "images/2025-01-13-17-57-45_146_hand.jpg", + "images/2025-01-13-17-57-45_179_hand.jpg", + "images/2025-01-13-17-57-45_212_hand.jpg", + "images/2025-01-13-17-57-45_245_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_386_hand.jpg", + "images/2025-01-13-17-57-45_389_hand.jpg", + "images/2025-01-13-17-57-45_392_hand.jpg", + "images/2025-01-13-17-57-45_396_hand.jpg", + "images/2025-01-13-17-57-45_399_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_1117_hand.jpg", + "images/2025-01-13-17-57-45_1216_hand.jpg", + "images/2025-01-13-17-57-45_1315_hand.jpg", + "images/2025-01-13-17-57-45_1414_hand.jpg", + "images/2025-01-13-17-57-45_1513_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_1650_hand.jpg", + "images/2025-01-13-17-57-45_1667_hand.jpg", + "images/2025-01-13-17-57-45_1684_hand.jpg", + "images/2025-01-13-17-57-45_1701_hand.jpg", + "images/2025-01-13-17-57-45_1718_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_1805_hand.jpg", + "images/2025-01-13-17-57-45_1861_hand.jpg", + "images/2025-01-13-17-57-45_1916_hand.jpg", + "images/2025-01-13-17-57-45_1972_hand.jpg", + "images/2025-01-13-17-57-45_2028_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_2171_hand.jpg", + "images/2025-01-13-17-57-45_2213_hand.jpg", + "images/2025-01-13-17-57-45_2256_hand.jpg", + "images/2025-01-13-17-57-45_2298_hand.jpg", + "images/2025-01-13-17-57-45_2340_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_2430_hand.jpg", + "images/2025-01-13-17-57-45_2457_hand.jpg", + "images/2025-01-13-17-57-45_2484_hand.jpg", + "images/2025-01-13-17-57-45_2512_hand.jpg", + "images/2025-01-13-17-57-45_2539_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_2584_hand.jpg", + "images/2025-01-13-17-57-45_2595_hand.jpg", + "images/2025-01-13-17-57-45_2606_hand.jpg", + "images/2025-01-13-17-57-45_2616_hand.jpg", + "images/2025-01-13-17-57-45_2627_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_2703_hand.jpg", + "images/2025-01-13-17-57-45_2756_hand.jpg", + "images/2025-01-13-17-57-45_2810_hand.jpg", + "images/2025-01-13-17-57-45_2864_hand.jpg", + "images/2025-01-13-17-57-45_2917_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3690_hand.jpg", + "images/2025-01-13-17-57-45_3693_hand.jpg", + "images/2025-01-13-17-57-45_3695_hand.jpg", + "images/2025-01-13-17-57-45_3697_hand.jpg", + "images/2025-01-13-17-57-45_3700_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3950_hand.jpg", + "images/2025-01-13-17-57-45_3973_hand.jpg", + "images/2025-01-13-17-57-45_3996_hand.jpg", + "images/2025-01-13-17-57-45_4019_hand.jpg", + "images/2025-01-13-17-57-45_4042_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4278_hand.jpg", + "images/2025-01-13-17-57-45_4280_hand.jpg", + "images/2025-01-13-17-57-45_4282_hand.jpg", + "images/2025-01-13-17-57-45_4283_hand.jpg", + "images/2025-01-13-17-57-45_4285_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4316_hand.jpg", + "images/2025-01-13-17-57-45_4339_hand.jpg", + "images/2025-01-13-17-57-45_4362_hand.jpg", + "images/2025-01-13-17-57-45_4384_hand.jpg", + "images/2025-01-13-17-57-45_4407_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4450_hand.jpg", + "images/2025-01-13-17-57-45_4463_hand.jpg", + "images/2025-01-13-17-57-45_4476_hand.jpg", + "images/2025-01-13-17-57-45_4490_hand.jpg", + "images/2025-01-13-17-57-45_4503_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4637_hand.jpg", + "images/2025-01-13-17-57-45_4661_hand.jpg", + "images/2025-01-13-17-57-45_4684_hand.jpg", + "images/2025-01-13-17-57-45_4707_hand.jpg", + "images/2025-01-13-17-57-45_4731_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4804_hand.jpg", + "images/2025-01-13-17-57-45_4828_hand.jpg", + "images/2025-01-13-17-57-45_4852_hand.jpg", + "images/2025-01-13-17-57-45_4875_hand.jpg", + "images/2025-01-13-17-57-45_4899_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4965_hand.jpg", + "images/2025-01-13-17-57-45_4996_hand.jpg", + "images/2025-01-13-17-57-45_5027_hand.jpg", + "images/2025-01-13-17-57-45_5058_hand.jpg", + "images/2025-01-13-17-57-45_5089_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5169_hand.jpg", + "images/2025-01-13-17-57-45_5190_hand.jpg", + "images/2025-01-13-17-57-45_5210_hand.jpg", + "images/2025-01-13-17-57-45_5231_hand.jpg", + "images/2025-01-13-17-57-45_5252_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5319_hand.jpg", + "images/2025-01-13-17-57-45_5347_hand.jpg", + "images/2025-01-13-17-57-45_5376_hand.jpg", + "images/2025-01-13-17-57-45_5404_hand.jpg", + "images/2025-01-13-17-57-45_5432_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5487_hand.jpg", + "images/2025-01-13-17-57-45_5504_hand.jpg", + "images/2025-01-13-17-57-45_5522_hand.jpg", + "images/2025-01-13-17-57-45_5540_hand.jpg", + "images/2025-01-13-17-57-45_5557_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5632_hand.jpg", + "images/2025-01-13-17-57-45_5677_hand.jpg", + "images/2025-01-13-17-57-45_5722_hand.jpg", + "images/2025-01-13-17-57-45_5767_hand.jpg", + "images/2025-01-13-17-57-45_5812_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5896_hand.jpg", + "images/2025-01-13-17-57-45_5921_hand.jpg", + "images/2025-01-13-17-57-45_5946_hand.jpg", + "images/2025-01-13-17-57-45_5971_hand.jpg", + "images/2025-01-13-17-57-45_5996_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6030_hand.jpg", + "images/2025-01-13-17-57-45_6033_hand.jpg", + "images/2025-01-13-17-57-45_6036_hand.jpg", + "images/2025-01-13-17-57-45_6040_hand.jpg", + "images/2025-01-13-17-57-45_6043_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6091_hand.jpg", + "images/2025-01-13-17-57-45_6127_hand.jpg", + "images/2025-01-13-17-57-45_6164_hand.jpg", + "images/2025-01-13-17-57-45_6200_hand.jpg", + "images/2025-01-13-17-57-45_6236_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6288_hand.jpg", + "images/2025-01-13-17-57-45_6294_hand.jpg", + "images/2025-01-13-17-57-45_6301_hand.jpg", + "images/2025-01-13-17-57-45_6308_hand.jpg", + "images/2025-01-13-17-57-45_6314_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6382_hand.jpg", + "images/2025-01-13-17-57-45_6408_hand.jpg", + "images/2025-01-13-17-57-45_6434_hand.jpg", + "images/2025-01-13-17-57-45_6461_hand.jpg", + "images/2025-01-13-17-57-45_6487_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6570_hand.jpg", + "images/2025-01-13-17-57-45_6588_hand.jpg", + "images/2025-01-13-17-57-45_6606_hand.jpg", + "images/2025-01-13-17-57-45_6624_hand.jpg", + "images/2025-01-13-17-57-45_6642_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6683_hand.jpg", + "images/2025-01-13-17-57-45_6700_hand.jpg", + "images/2025-01-13-17-57-45_6716_hand.jpg", + "images/2025-01-13-17-57-45_6732_hand.jpg", + "images/2025-01-13-17-57-45_6749_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6808_hand.jpg", + "images/2025-01-13-17-57-45_6828_hand.jpg", + "images/2025-01-13-17-57-45_6848_hand.jpg", + "images/2025-01-13-17-57-45_6868_hand.jpg", + "images/2025-01-13-17-57-45_6888_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6958_hand.jpg", + "images/2025-01-13-17-57-45_6978_hand.jpg", + "images/2025-01-13-17-57-45_6998_hand.jpg", + "images/2025-01-13-17-57-45_7019_hand.jpg", + "images/2025-01-13-17-57-45_7039_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7071_hand.jpg", + "images/2025-01-13-17-57-45_7077_hand.jpg", + "images/2025-01-13-17-57-45_7082_hand.jpg", + "images/2025-01-13-17-57-45_7088_hand.jpg", + "images/2025-01-13-17-57-45_7094_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7106_hand.jpg", + "images/2025-01-13-17-57-45_7109_hand.jpg", + "images/2025-01-13-17-57-45_7112_hand.jpg", + "images/2025-01-13-17-57-45_7116_hand.jpg", + "images/2025-01-13-17-57-45_7119_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7149_hand.jpg", + "images/2025-01-13-17-57-45_7170_hand.jpg", + "images/2025-01-13-17-57-45_7190_hand.jpg", + "images/2025-01-13-17-57-45_7210_hand.jpg", + "images/2025-01-13-17-57-45_7231_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7373_hand.jpg", + "images/2025-01-13-17-57-45_7394_hand.jpg", + "images/2025-01-13-17-57-45_7414_hand.jpg", + "images/2025-01-13-17-57-45_7434_hand.jpg", + "images/2025-01-13-17-57-45_7455_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7483_hand.jpg", + "images/2025-01-13-17-57-45_7487_hand.jpg", + "images/2025-01-13-17-57-45_7492_hand.jpg", + "images/2025-01-13-17-57-45_7496_hand.jpg", + "images/2025-01-13-17-57-45_7500_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7508_hand.jpg", + "images/2025-01-13-17-57-45_7511_hand.jpg", + "images/2025-01-13-17-57-45_7513_hand.jpg", + "images/2025-01-13-17-57-45_7515_hand.jpg", + "images/2025-01-13-17-57-45_7518_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7545_hand.jpg", + "images/2025-01-13-17-57-45_7565_hand.jpg", + "images/2025-01-13-17-57-45_7586_hand.jpg", + "images/2025-01-13-17-57-45_7606_hand.jpg", + "images/2025-01-13-17-57-45_7626_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7691_hand.jpg", + "images/2025-01-13-17-57-45_7710_hand.jpg", + "images/2025-01-13-17-57-45_7730_hand.jpg", + "images/2025-01-13-17-57-45_7749_hand.jpg", + "images/2025-01-13-17-57-45_7768_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7814_hand.jpg", + "images/2025-01-13-17-57-45_7835_hand.jpg", + "images/2025-01-13-17-57-45_7855_hand.jpg", + "images/2025-01-13-17-57-45_7875_hand.jpg", + "images/2025-01-13-17-57-45_7896_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7923_hand.jpg", + "images/2025-01-13-17-57-45_7926_hand.jpg", + "images/2025-01-13-17-57-45_7929_hand.jpg", + "images/2025-01-13-17-57-45_7932_hand.jpg", + "images/2025-01-13-17-57-45_7935_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7973_hand.jpg", + "images/2025-01-13-17-57-45_8001_hand.jpg", + "images/2025-01-13-17-57-45_8028_hand.jpg", + "images/2025-01-13-17-57-45_8056_hand.jpg", + "images/2025-01-13-17-57-45_8084_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8176_hand.jpg", + "images/2025-01-13-17-57-45_8206_hand.jpg", + "images/2025-01-13-17-57-45_8236_hand.jpg", + "images/2025-01-13-17-57-45_8265_hand.jpg", + "images/2025-01-13-17-57-45_8295_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8402_hand.jpg", + "images/2025-01-13-17-57-45_8462_hand.jpg", + "images/2025-01-13-17-57-45_8521_hand.jpg", + "images/2025-01-13-17-57-45_8580_hand.jpg", + "images/2025-01-13-17-57-45_8640_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8714_hand.jpg", + "images/2025-01-13-17-57-45_8717_hand.jpg", + "images/2025-01-13-17-57-45_8719_hand.jpg", + "images/2025-01-13-17-57-45_8721_hand.jpg", + "images/2025-01-13-17-57-45_8724_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8758_hand.jpg", + "images/2025-01-13-17-57-45_8783_hand.jpg", + "images/2025-01-13-17-57-45_8809_hand.jpg", + "images/2025-01-13-17-57-45_8835_hand.jpg", + "images/2025-01-13-17-57-45_8860_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8894_hand.jpg", + "images/2025-01-13-17-57-45_8896_hand.jpg", + "images/2025-01-13-17-57-45_8898_hand.jpg", + "images/2025-01-13-17-57-45_8901_hand.jpg", + "images/2025-01-13-17-57-45_8903_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8945_hand.jpg", + "images/2025-01-13-17-57-45_8977_hand.jpg", + "images/2025-01-13-17-57-45_9009_hand.jpg", + "images/2025-01-13-17-57-45_9041_hand.jpg", + "images/2025-01-13-17-57-45_9073_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_9127_hand.jpg", + "images/2025-01-13-17-57-45_9141_hand.jpg", + "images/2025-01-13-17-57-45_9154_hand.jpg", + "images/2025-01-13-17-57-45_9167_hand.jpg", + "images/2025-01-13-17-57-45_9181_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_9251_hand.jpg", + "images/2025-01-13-17-57-45_9279_hand.jpg", + "images/2025-01-13-17-57-45_9307_hand.jpg", + "images/2025-01-13-17-57-45_9335_hand.jpg", + "images/2025-01-13-17-57-45_9363_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_9447_hand.jpg", + "images/2025-01-13-17-57-45_9472_hand.jpg", + "images/2025-01-13-17-57-45_9496_hand.jpg", + "images/2025-01-13-17-57-45_9520_hand.jpg", + "images/2025-01-13-17-57-45_9545_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_9650_hand.jpg", + "images/2025-01-13-17-57-45_9714_hand.jpg", + "images/2025-01-13-17-57-45_9778_hand.jpg", + "images/2025-01-13-17-57-45_9841_hand.jpg", + "images/2025-01-13-17-57-45_9905_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10188_hand.jpg", + "images/2025-01-13-17-57-45_10240_hand.jpg", + "images/2025-01-13-17-57-45_10292_hand.jpg", + "images/2025-01-13-17-57-45_10344_hand.jpg", + "images/2025-01-13-17-57-45_10396_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10462_hand.jpg", + "images/2025-01-13-17-57-45_10464_hand.jpg", + "images/2025-01-13-17-57-45_10466_hand.jpg", + "images/2025-01-13-17-57-45_10468_hand.jpg", + "images/2025-01-13-17-57-45_10470_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10505_hand.jpg", + "images/2025-01-13-17-57-45_10531_hand.jpg", + "images/2025-01-13-17-57-45_10556_hand.jpg", + "images/2025-01-13-17-57-45_10582_hand.jpg", + "images/2025-01-13-17-57-45_10608_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10643_hand.jpg", + "images/2025-01-13-17-57-45_10646_hand.jpg", + "images/2025-01-13-17-57-45_10650_hand.jpg", + "images/2025-01-13-17-57-45_10653_hand.jpg", + "images/2025-01-13-17-57-45_10656_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10757_hand.jpg", + "images/2025-01-13-17-57-45_10760_hand.jpg", + "images/2025-01-13-17-57-45_10762_hand.jpg", + "images/2025-01-13-17-57-45_10764_hand.jpg", + "images/2025-01-13-17-57-45_10767_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11110_hand.jpg", + "images/2025-01-13-17-57-45_11120_hand.jpg", + "images/2025-01-13-17-57-45_11130_hand.jpg", + "images/2025-01-13-17-57-45_11140_hand.jpg", + "images/2025-01-13-17-57-45_11150_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11211_hand.jpg", + "images/2025-01-13-17-57-45_11253_hand.jpg", + "images/2025-01-13-17-57-45_11296_hand.jpg", + "images/2025-01-13-17-57-45_11338_hand.jpg", + "images/2025-01-13-17-57-45_11380_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11667_hand.jpg", + "images/2025-01-13-17-57-45_11699_hand.jpg", + "images/2025-01-13-17-57-45_11732_hand.jpg", + "images/2025-01-13-17-57-45_11764_hand.jpg", + "images/2025-01-13-17-57-45_11796_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11862_hand.jpg", + "images/2025-01-13-17-57-45_11886_hand.jpg", + "images/2025-01-13-17-57-45_11909_hand.jpg", + "images/2025-01-13-17-57-45_11932_hand.jpg", + "images/2025-01-13-17-57-45_11956_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12014_hand.jpg", + "images/2025-01-13-17-57-45_12039_hand.jpg", + "images/2025-01-13-17-57-45_12064_hand.jpg", + "images/2025-01-13-17-57-45_12089_hand.jpg", + "images/2025-01-13-17-57-45_12114_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12391_hand.jpg", + "images/2025-01-13-17-57-45_12408_hand.jpg", + "images/2025-01-13-17-57-45_12425_hand.jpg", + "images/2025-01-13-17-57-45_12442_hand.jpg", + "images/2025-01-13-17-57-45_12459_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12499_hand.jpg", + "images/2025-01-13-17-57-45_12516_hand.jpg", + "images/2025-01-13-17-57-45_12533_hand.jpg", + "images/2025-01-13-17-57-45_12550_hand.jpg", + "images/2025-01-13-17-57-45_12567_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12996_hand.jpg", + "images/2025-01-13-17-57-45_13006_hand.jpg", + "images/2025-01-13-17-57-45_13016_hand.jpg", + "images/2025-01-13-17-57-45_13026_hand.jpg", + "images/2025-01-13-17-57-45_13036_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_13053_hand.jpg", + "images/2025-01-13-17-57-45_13057_hand.jpg", + "images/2025-01-13-17-57-45_13061_hand.jpg", + "images/2025-01-13-17-57-45_13065_hand.jpg", + "images/2025-01-13-17-57-45_13069_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this operation will fail" + } + ], + "images": [ + "images/2025-01-13-17-57-45_100_hand.jpg", + "images/2025-01-13-17-57-45_116_hand.jpg", + "images/2025-01-13-17-57-45_131_hand.jpg", + "images/2025-01-13-17-57-45_147_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-17-57-45_300_hand.jpg", + "images/2025-01-13-17-57-45_309_hand.jpg", + "images/2025-01-13-17-57-45_318_hand.jpg", + "images/2025-01-13-17-57-45_326_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-17-57-45_420_hand.jpg", + "images/2025-01-13-17-57-45_430_hand.jpg", + "images/2025-01-13-17-57-45_439_hand.jpg", + "images/2025-01-13-17-57-45_448_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-17-57-45_550_hand.jpg", + "images/2025-01-13-17-57-45_563_hand.jpg", + "images/2025-01-13-17-57-45_576_hand.jpg", + "images/2025-01-13-17-57-45_589_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this operation will fail" + } + ], + "images": [ + "images/2025-01-13-17-57-45_819_hand.jpg", + "images/2025-01-13-17-57-45_888_hand.jpg", + "images/2025-01-13-17-57-45_958_hand.jpg", + "images/2025-01-13-17-57-45_1027_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-17-57-45_1691_hand.jpg", + "images/2025-01-13-17-57-45_1728_hand.jpg", + "images/2025-01-13-17-57-45_1765_hand.jpg", + "images/2025-01-13-17-57-45_1801_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-17-57-45_2155_hand.jpg", + "images/2025-01-13-17-57-45_2176_hand.jpg", + "images/2025-01-13-17-57-45_2197_hand.jpg", + "images/2025-01-13-17-57-45_2217_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-17-57-45_2474_hand.jpg", + "images/2025-01-13-17-57-45_2520_hand.jpg", + "images/2025-01-13-17-57-45_2566_hand.jpg", + "images/2025-01-13-17-57-45_2612_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3031_hand.jpg", + "images/2025-01-13-17-57-45_3042_hand.jpg", + "images/2025-01-13-17-57-45_3053_hand.jpg", + "images/2025-01-13-17-57-45_3064_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this operation will fail" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3184_hand.jpg", + "images/2025-01-13-17-57-45_3198_hand.jpg", + "images/2025-01-13-17-57-45_3212_hand.jpg", + "images/2025-01-13-17-57-45_3226_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3374_hand.jpg", + "images/2025-01-13-17-57-45_3388_hand.jpg", + "images/2025-01-13-17-57-45_3403_hand.jpg", + "images/2025-01-13-17-57-45_3418_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3568_hand.jpg", + "images/2025-01-13-17-57-45_3579_hand.jpg", + "images/2025-01-13-17-57-45_3591_hand.jpg", + "images/2025-01-13-17-57-45_3602_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3751_hand.jpg", + "images/2025-01-13-17-57-45_3780_hand.jpg", + "images/2025-01-13-17-57-45_3808_hand.jpg", + "images/2025-01-13-17-57-45_3837_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4112_hand.jpg", + "images/2025-01-13-17-57-45_4127_hand.jpg", + "images/2025-01-13-17-57-45_4142_hand.jpg", + "images/2025-01-13-17-57-45_4157_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4329_hand.jpg", + "images/2025-01-13-17-57-45_4353_hand.jpg", + "images/2025-01-13-17-57-45_4377_hand.jpg", + "images/2025-01-13-17-57-45_4401_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4630_hand.jpg", + "images/2025-01-13-17-57-45_4643_hand.jpg", + "images/2025-01-13-17-57-45_4655_hand.jpg", + "images/2025-01-13-17-57-45_4667_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4820_hand.jpg", + "images/2025-01-13-17-57-45_4848_hand.jpg", + "images/2025-01-13-17-57-45_4876_hand.jpg", + "images/2025-01-13-17-57-45_4903_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5161_hand.jpg", + "images/2025-01-13-17-57-45_5172_hand.jpg", + "images/2025-01-13-17-57-45_5182_hand.jpg", + "images/2025-01-13-17-57-45_5193_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5358_hand.jpg", + "images/2025-01-13-17-57-45_5401_hand.jpg", + "images/2025-01-13-17-57-45_5445_hand.jpg", + "images/2025-01-13-17-57-45_5488_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5889_hand.jpg", + "images/2025-01-13-17-57-45_5903_hand.jpg", + "images/2025-01-13-17-57-45_5916_hand.jpg", + "images/2025-01-13-17-57-45_5930_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6086_hand.jpg", + "images/2025-01-13-17-57-45_6108_hand.jpg", + "images/2025-01-13-17-57-45_6131_hand.jpg", + "images/2025-01-13-17-57-45_6154_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6376_hand.jpg", + "images/2025-01-13-17-57-45_6391_hand.jpg", + "images/2025-01-13-17-57-45_6406_hand.jpg", + "images/2025-01-13-17-57-45_6421_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6578_hand.jpg", + "images/2025-01-13-17-57-45_6596_hand.jpg", + "images/2025-01-13-17-57-45_6613_hand.jpg", + "images/2025-01-13-17-57-45_6631_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6803_hand.jpg", + "images/2025-01-13-17-57-45_6814_hand.jpg", + "images/2025-01-13-17-57-45_6825_hand.jpg", + "images/2025-01-13-17-57-45_6836_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6958_hand.jpg", + "images/2025-01-13-17-57-45_6972_hand.jpg", + "images/2025-01-13-17-57-45_6986_hand.jpg", + "images/2025-01-13-17-57-45_7000_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this operation will fail" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7141_hand.jpg", + "images/2025-01-13-17-57-45_7151_hand.jpg", + "images/2025-01-13-17-57-45_7161_hand.jpg", + "images/2025-01-13-17-57-45_7170_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7268_hand.jpg", + "images/2025-01-13-17-57-45_7275_hand.jpg", + "images/2025-01-13-17-57-45_7282_hand.jpg", + "images/2025-01-13-17-57-45_7289_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7371_hand.jpg", + "images/2025-01-13-17-57-45_7384_hand.jpg", + "images/2025-01-13-17-57-45_7397_hand.jpg", + "images/2025-01-13-17-57-45_7410_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7540_hand.jpg", + "images/2025-01-13-17-57-45_7551_hand.jpg", + "images/2025-01-13-17-57-45_7562_hand.jpg", + "images/2025-01-13-17-57-45_7573_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7702_hand.jpg", + "images/2025-01-13-17-57-45_7723_hand.jpg", + "images/2025-01-13-17-57-45_7743_hand.jpg", + "images/2025-01-13-17-57-45_7763_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7965_hand.jpg", + "images/2025-01-13-17-57-45_7980_hand.jpg", + "images/2025-01-13-17-57-45_7995_hand.jpg", + "images/2025-01-13-17-57-45_8010_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8213_hand.jpg", + "images/2025-01-13-17-57-45_8257_hand.jpg", + "images/2025-01-13-17-57-45_8301_hand.jpg", + "images/2025-01-13-17-57-45_8345_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8750_hand.jpg", + "images/2025-01-13-17-57-45_8763_hand.jpg", + "images/2025-01-13-17-57-45_8777_hand.jpg", + "images/2025-01-13-17-57-45_8790_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8945_hand.jpg", + "images/2025-01-13-17-57-45_8968_hand.jpg", + "images/2025-01-13-17-57-45_8992_hand.jpg", + "images/2025-01-13-17-57-45_9015_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-17-57-45_9243_hand.jpg", + "images/2025-01-13-17-57-45_9257_hand.jpg", + "images/2025-01-13-17-57-45_9272_hand.jpg", + "images/2025-01-13-17-57-45_9287_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this operation will fail" + } + ], + "images": [ + "images/2025-01-13-17-57-45_9488_hand.jpg", + "images/2025-01-13-17-57-45_9530_hand.jpg", + "images/2025-01-13-17-57-45_9572_hand.jpg", + "images/2025-01-13-17-57-45_9615_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10042_hand.jpg", + "images/2025-01-13-17-57-45_10079_hand.jpg", + "images/2025-01-13-17-57-45_10116_hand.jpg", + "images/2025-01-13-17-57-45_10153_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10498_hand.jpg", + "images/2025-01-13-17-57-45_10512_hand.jpg", + "images/2025-01-13-17-57-45_10526_hand.jpg", + "images/2025-01-13-17-57-45_10540_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10674_hand.jpg", + "images/2025-01-13-17-57-45_10683_hand.jpg", + "images/2025-01-13-17-57-45_10691_hand.jpg", + "images/2025-01-13-17-57-45_10699_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10787_hand.jpg", + "images/2025-01-13-17-57-45_10797_hand.jpg", + "images/2025-01-13-17-57-45_10806_hand.jpg", + "images/2025-01-13-17-57-45_10816_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10966_hand.jpg", + "images/2025-01-13-17-57-45_11005_hand.jpg", + "images/2025-01-13-17-57-45_11045_hand.jpg", + "images/2025-01-13-17-57-45_11085_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11455_hand.jpg", + "images/2025-01-13-17-57-45_11470_hand.jpg", + "images/2025-01-13-17-57-45_11485_hand.jpg", + "images/2025-01-13-17-57-45_11500_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11696_hand.jpg", + "images/2025-01-13-17-57-45_11736_hand.jpg", + "images/2025-01-13-17-57-45_11777_hand.jpg", + "images/2025-01-13-17-57-45_11817_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12191_hand.jpg", + "images/2025-01-13-17-57-45_12207_hand.jpg", + "images/2025-01-13-17-57-45_12222_hand.jpg", + "images/2025-01-13-17-57-45_12237_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this operation will fail" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12398_hand.jpg", + "images/2025-01-13-17-57-45_12415_hand.jpg", + "images/2025-01-13-17-57-45_12431_hand.jpg", + "images/2025-01-13-17-57-45_12447_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12604_hand.jpg", + "images/2025-01-13-17-57-45_12613_hand.jpg", + "images/2025-01-13-17-57-45_12622_hand.jpg", + "images/2025-01-13-17-57-45_12631_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12729_hand.jpg", + "images/2025-01-13-17-57-45_12741_hand.jpg", + "images/2025-01-13-17-57-45_12754_hand.jpg", + "images/2025-01-13-17-57-45_12766_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12897_hand.jpg", + "images/2025-01-13-17-57-45_12912_hand.jpg", + "images/2025-01-13-17-57-45_12927_hand.jpg", + "images/2025-01-13-17-57-45_12942_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/2025-01-13-17-57-45_615_hand.jpg", + "images/2025-01-13-17-57-45_344_hand.jpg", + "images/2025-01-13-17-57-45_466_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/2025-01-13-17-57-45_615_hand.jpg", + "images/2025-01-13-17-57-45_466_hand.jpg", + "images/2025-01-13-17-57-45_1875_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/2025-01-13-17-57-45_1875_hand.jpg", + "images/2025-01-13-17-57-45_2259_hand.jpg", + "images/2025-01-13-17-57-45_615_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/2025-01-13-17-57-45_2704_hand.jpg", + "images/2025-01-13-17-57-45_2259_hand.jpg", + "images/2025-01-13-17-57-45_1875_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/2025-01-13-17-57-45_2704_hand.jpg", + "images/2025-01-13-17-57-45_3085_hand.jpg", + "images/2025-01-13-17-57-45_2259_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/2025-01-13-17-57-45_2704_hand.jpg", + "images/2025-01-13-17-57-45_3448_hand.jpg", + "images/2025-01-13-17-57-45_3085_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3625_hand.jpg", + "images/2025-01-13-17-57-45_3448_hand.jpg", + "images/2025-01-13-17-57-45_3085_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3448_hand.jpg", + "images/2025-01-13-17-57-45_3625_hand.jpg", + "images/2025-01-13-17-57-45_3894_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3894_hand.jpg", + "images/2025-01-13-17-57-45_4187_hand.jpg", + "images/2025-01-13-17-57-45_3625_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3894_hand.jpg", + "images/2025-01-13-17-57-45_4449_hand.jpg", + "images/2025-01-13-17-57-45_4187_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4449_hand.jpg", + "images/2025-01-13-17-57-45_4691_hand.jpg", + "images/2025-01-13-17-57-45_4187_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4958_hand.jpg", + "images/2025-01-13-17-57-45_4449_hand.jpg", + "images/2025-01-13-17-57-45_4691_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4958_hand.jpg", + "images/2025-01-13-17-57-45_4691_hand.jpg", + "images/2025-01-13-17-57-45_5214_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5214_hand.jpg", + "images/2025-01-13-17-57-45_5575_hand.jpg", + "images/2025-01-13-17-57-45_4958_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5957_hand.jpg", + "images/2025-01-13-17-57-45_5214_hand.jpg", + "images/2025-01-13-17-57-45_5575_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5575_hand.jpg", + "images/2025-01-13-17-57-45_5957_hand.jpg", + "images/2025-01-13-17-57-45_6199_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6199_hand.jpg", + "images/2025-01-13-17-57-45_5957_hand.jpg", + "images/2025-01-13-17-57-45_6450_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6666_hand.jpg", + "images/2025-01-13-17-57-45_6450_hand.jpg", + "images/2025-01-13-17-57-45_6199_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6666_hand.jpg", + "images/2025-01-13-17-57-45_6450_hand.jpg", + "images/2025-01-13-17-57-45_6858_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6858_hand.jpg", + "images/2025-01-13-17-57-45_7028_hand.jpg", + "images/2025-01-13-17-57-45_6666_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6858_hand.jpg", + "images/2025-01-13-17-57-45_7302_hand.jpg", + "images/2025-01-13-17-57-45_7028_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7028_hand.jpg", + "images/2025-01-13-17-57-45_7435_hand.jpg", + "images/2025-01-13-17-57-45_7302_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7435_hand.jpg", + "images/2025-01-13-17-57-45_7302_hand.jpg", + "images/2025-01-13-17-57-45_7595_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7803_hand.jpg", + "images/2025-01-13-17-57-45_7435_hand.jpg", + "images/2025-01-13-17-57-45_7595_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8039_hand.jpg", + "images/2025-01-13-17-57-45_7595_hand.jpg", + "images/2025-01-13-17-57-45_7803_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7803_hand.jpg", + "images/2025-01-13-17-57-45_8039_hand.jpg", + "images/2025-01-13-17-57-45_8433_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8817_hand.jpg", + "images/2025-01-13-17-57-45_8433_hand.jpg", + "images/2025-01-13-17-57-45_8039_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/2025-01-13-17-57-45_9061_hand.jpg", + "images/2025-01-13-17-57-45_8433_hand.jpg", + "images/2025-01-13-17-57-45_8817_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/2025-01-13-17-57-45_9317_hand.jpg", + "images/2025-01-13-17-57-45_9061_hand.jpg", + "images/2025-01-13-17-57-45_8817_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/2025-01-13-17-57-45_9061_hand.jpg", + "images/2025-01-13-17-57-45_9317_hand.jpg", + "images/2025-01-13-17-57-45_10227_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10227_hand.jpg", + "images/2025-01-13-17-57-45_10567_hand.jpg", + "images/2025-01-13-17-57-45_9317_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10715_hand.jpg", + "images/2025-01-13-17-57-45_10567_hand.jpg", + "images/2025-01-13-17-57-45_10227_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10835_hand.jpg", + "images/2025-01-13-17-57-45_10567_hand.jpg", + "images/2025-01-13-17-57-45_10715_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10715_hand.jpg", + "images/2025-01-13-17-57-45_11164_hand.jpg", + "images/2025-01-13-17-57-45_10835_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11164_hand.jpg", + "images/2025-01-13-17-57-45_10835_hand.jpg", + "images/2025-01-13-17-57-45_11529_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11529_hand.jpg", + "images/2025-01-13-17-57-45_11164_hand.jpg", + "images/2025-01-13-17-57-45_11897_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11529_hand.jpg", + "images/2025-01-13-17-57-45_11897_hand.jpg", + "images/2025-01-13-17-57-45_12268_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12268_hand.jpg", + "images/2025-01-13-17-57-45_12648_hand.jpg", + "images/2025-01-13-17-57-45_11897_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12790_hand.jpg", + "images/2025-01-13-17-57-45_12648_hand.jpg", + "images/2025-01-13-17-57-45_12268_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12648_hand.jpg", + "images/2025-01-13-17-57-45_12790_hand.jpg", + "images/2025-01-13-17-57-45_12972_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_344_hand.jpg", + "images/2025-01-13-17-57-45_466_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_615_hand.jpg", + "images/2025-01-13-17-57-45_466_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_615_hand.jpg", + "images/2025-01-13-17-57-45_1875_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_2259_hand.jpg", + "images/2025-01-13-17-57-45_1875_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_2704_hand.jpg", + "images/2025-01-13-17-57-45_2259_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_2704_hand.jpg", + "images/2025-01-13-17-57-45_3085_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3448_hand.jpg", + "images/2025-01-13-17-57-45_3085_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3625_hand.jpg", + "images/2025-01-13-17-57-45_3448_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3625_hand.jpg", + "images/2025-01-13-17-57-45_3894_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4187_hand.jpg", + "images/2025-01-13-17-57-45_3894_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4449_hand.jpg", + "images/2025-01-13-17-57-45_4187_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4449_hand.jpg", + "images/2025-01-13-17-57-45_4691_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4691_hand.jpg", + "images/2025-01-13-17-57-45_4958_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5214_hand.jpg", + "images/2025-01-13-17-57-45_4958_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5575_hand.jpg", + "images/2025-01-13-17-57-45_5214_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5957_hand.jpg", + "images/2025-01-13-17-57-45_5575_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6199_hand.jpg", + "images/2025-01-13-17-57-45_5957_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6450_hand.jpg", + "images/2025-01-13-17-57-45_6199_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6666_hand.jpg", + "images/2025-01-13-17-57-45_6450_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6858_hand.jpg", + "images/2025-01-13-17-57-45_6666_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6858_hand.jpg", + "images/2025-01-13-17-57-45_7028_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7028_hand.jpg", + "images/2025-01-13-17-57-45_7302_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7302_hand.jpg", + "images/2025-01-13-17-57-45_7435_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7595_hand.jpg", + "images/2025-01-13-17-57-45_7435_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7595_hand.jpg", + "images/2025-01-13-17-57-45_7803_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7803_hand.jpg", + "images/2025-01-13-17-57-45_8039_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8039_hand.jpg", + "images/2025-01-13-17-57-45_8433_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8817_hand.jpg", + "images/2025-01-13-17-57-45_8433_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8817_hand.jpg", + "images/2025-01-13-17-57-45_9061_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_9317_hand.jpg", + "images/2025-01-13-17-57-45_9061_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10227_hand.jpg", + "images/2025-01-13-17-57-45_9317_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10567_hand.jpg", + "images/2025-01-13-17-57-45_10227_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10567_hand.jpg", + "images/2025-01-13-17-57-45_10715_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10835_hand.jpg", + "images/2025-01-13-17-57-45_10715_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10835_hand.jpg", + "images/2025-01-13-17-57-45_11164_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11164_hand.jpg", + "images/2025-01-13-17-57-45_11529_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11897_hand.jpg", + "images/2025-01-13-17-57-45_11529_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11897_hand.jpg", + "images/2025-01-13-17-57-45_12268_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12268_hand.jpg", + "images/2025-01-13-17-57-45_12648_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12790_hand.jpg", + "images/2025-01-13-17-57-45_12648_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12972_hand.jpg", + "images/2025-01-13-17-57-45_12790_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_615_hand.jpg", + "images/2025-01-13-17-57-45_344_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_466_hand.jpg", + "images/2025-01-13-17-57-45_1875_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_615_hand.jpg", + "images/2025-01-13-17-57-45_2259_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_1875_hand.jpg", + "images/2025-01-13-17-57-45_2704_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3085_hand.jpg", + "images/2025-01-13-17-57-45_2259_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3448_hand.jpg", + "images/2025-01-13-17-57-45_2704_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3085_hand.jpg", + "images/2025-01-13-17-57-45_3625_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3448_hand.jpg", + "images/2025-01-13-17-57-45_3894_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3625_hand.jpg", + "images/2025-01-13-17-57-45_4187_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3894_hand.jpg", + "images/2025-01-13-17-57-45_4449_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4691_hand.jpg", + "images/2025-01-13-17-57-45_4187_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4958_hand.jpg", + "images/2025-01-13-17-57-45_4449_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5214_hand.jpg", + "images/2025-01-13-17-57-45_4691_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5575_hand.jpg", + "images/2025-01-13-17-57-45_4958_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5214_hand.jpg", + "images/2025-01-13-17-57-45_5957_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5575_hand.jpg", + "images/2025-01-13-17-57-45_6199_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5957_hand.jpg", + "images/2025-01-13-17-57-45_6450_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6199_hand.jpg", + "images/2025-01-13-17-57-45_6666_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6450_hand.jpg", + "images/2025-01-13-17-57-45_6858_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6666_hand.jpg", + "images/2025-01-13-17-57-45_7028_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7302_hand.jpg", + "images/2025-01-13-17-57-45_6858_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7028_hand.jpg", + "images/2025-01-13-17-57-45_7435_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7595_hand.jpg", + "images/2025-01-13-17-57-45_7302_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7803_hand.jpg", + "images/2025-01-13-17-57-45_7435_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8039_hand.jpg", + "images/2025-01-13-17-57-45_7595_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8433_hand.jpg", + "images/2025-01-13-17-57-45_7803_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8817_hand.jpg", + "images/2025-01-13-17-57-45_8039_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8433_hand.jpg", + "images/2025-01-13-17-57-45_9061_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_9317_hand.jpg", + "images/2025-01-13-17-57-45_8817_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10227_hand.jpg", + "images/2025-01-13-17-57-45_9061_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_9317_hand.jpg", + "images/2025-01-13-17-57-45_10567_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10227_hand.jpg", + "images/2025-01-13-17-57-45_10715_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10835_hand.jpg", + "images/2025-01-13-17-57-45_10567_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11164_hand.jpg", + "images/2025-01-13-17-57-45_10715_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10835_hand.jpg", + "images/2025-01-13-17-57-45_11529_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11164_hand.jpg", + "images/2025-01-13-17-57-45_11897_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11529_hand.jpg", + "images/2025-01-13-17-57-45_12268_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11897_hand.jpg", + "images/2025-01-13-17-57-45_12648_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12268_hand.jpg", + "images/2025-01-13-17-57-45_12790_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12648_hand.jpg", + "images/2025-01-13-17-57-45_12972_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_318_hand.jpg", + "images/2025-01-13-17-57-45_321_hand.jpg", + "images/2025-01-13-17-57-45_324_hand.jpg", + "images/2025-01-13-17-57-45_327_hand.jpg", + "images/2025-01-13-17-57-45_330_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_338_hand.jpg", + "images/2025-01-13-17-57-45_341_hand.jpg", + "images/2025-01-13-17-57-45_344_hand.jpg", + "images/2025-01-13-17-57-45_347_hand.jpg", + "images/2025-01-13-17-57-45_350_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_358_hand.jpg", + "images/2025-01-13-17-57-45_361_hand.jpg", + "images/2025-01-13-17-57-45_364_hand.jpg", + "images/2025-01-13-17-57-45_367_hand.jpg", + "images/2025-01-13-17-57-45_370_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_438_hand.jpg", + "images/2025-01-13-17-57-45_441_hand.jpg", + "images/2025-01-13-17-57-45_444_hand.jpg", + "images/2025-01-13-17-57-45_447_hand.jpg", + "images/2025-01-13-17-57-45_450_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_460_hand.jpg", + "images/2025-01-13-17-57-45_463_hand.jpg", + "images/2025-01-13-17-57-45_466_hand.jpg", + "images/2025-01-13-17-57-45_469_hand.jpg", + "images/2025-01-13-17-57-45_472_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_482_hand.jpg", + "images/2025-01-13-17-57-45_485_hand.jpg", + "images/2025-01-13-17-57-45_488_hand.jpg", + "images/2025-01-13-17-57-45_491_hand.jpg", + "images/2025-01-13-17-57-45_494_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_579_hand.jpg", + "images/2025-01-13-17-57-45_582_hand.jpg", + "images/2025-01-13-17-57-45_585_hand.jpg", + "images/2025-01-13-17-57-45_588_hand.jpg", + "images/2025-01-13-17-57-45_591_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_610_hand.jpg", + "images/2025-01-13-17-57-45_613_hand.jpg", + "images/2025-01-13-17-57-45_616_hand.jpg", + "images/2025-01-13-17-57-45_619_hand.jpg", + "images/2025-01-13-17-57-45_622_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_640_hand.jpg", + "images/2025-01-13-17-57-45_643_hand.jpg", + "images/2025-01-13-17-57-45_646_hand.jpg", + "images/2025-01-13-17-57-45_649_hand.jpg", + "images/2025-01-13-17-57-45_652_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_1784_hand.jpg", + "images/2025-01-13-17-57-45_1787_hand.jpg", + "images/2025-01-13-17-57-45_1790_hand.jpg", + "images/2025-01-13-17-57-45_1793_hand.jpg", + "images/2025-01-13-17-57-45_1796_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_1869_hand.jpg", + "images/2025-01-13-17-57-45_1872_hand.jpg", + "images/2025-01-13-17-57-45_1875_hand.jpg", + "images/2025-01-13-17-57-45_1878_hand.jpg", + "images/2025-01-13-17-57-45_1881_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_1954_hand.jpg", + "images/2025-01-13-17-57-45_1957_hand.jpg", + "images/2025-01-13-17-57-45_1960_hand.jpg", + "images/2025-01-13-17-57-45_1963_hand.jpg", + "images/2025-01-13-17-57-45_1966_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_2205_hand.jpg", + "images/2025-01-13-17-57-45_2208_hand.jpg", + "images/2025-01-13-17-57-45_2211_hand.jpg", + "images/2025-01-13-17-57-45_2214_hand.jpg", + "images/2025-01-13-17-57-45_2217_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_2253_hand.jpg", + "images/2025-01-13-17-57-45_2256_hand.jpg", + "images/2025-01-13-17-57-45_2259_hand.jpg", + "images/2025-01-13-17-57-45_2262_hand.jpg", + "images/2025-01-13-17-57-45_2265_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_2301_hand.jpg", + "images/2025-01-13-17-57-45_2304_hand.jpg", + "images/2025-01-13-17-57-45_2307_hand.jpg", + "images/2025-01-13-17-57-45_2310_hand.jpg", + "images/2025-01-13-17-57-45_2313_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_2591_hand.jpg", + "images/2025-01-13-17-57-45_2594_hand.jpg", + "images/2025-01-13-17-57-45_2597_hand.jpg", + "images/2025-01-13-17-57-45_2600_hand.jpg", + "images/2025-01-13-17-57-45_2603_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_2698_hand.jpg", + "images/2025-01-13-17-57-45_2701_hand.jpg", + "images/2025-01-13-17-57-45_2704_hand.jpg", + "images/2025-01-13-17-57-45_2707_hand.jpg", + "images/2025-01-13-17-57-45_2710_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_2806_hand.jpg", + "images/2025-01-13-17-57-45_2809_hand.jpg", + "images/2025-01-13-17-57-45_2812_hand.jpg", + "images/2025-01-13-17-57-45_2815_hand.jpg", + "images/2025-01-13-17-57-45_2818_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3054_hand.jpg", + "images/2025-01-13-17-57-45_3057_hand.jpg", + "images/2025-01-13-17-57-45_3060_hand.jpg", + "images/2025-01-13-17-57-45_3063_hand.jpg", + "images/2025-01-13-17-57-45_3066_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3080_hand.jpg", + "images/2025-01-13-17-57-45_3083_hand.jpg", + "images/2025-01-13-17-57-45_3086_hand.jpg", + "images/2025-01-13-17-57-45_3089_hand.jpg", + "images/2025-01-13-17-57-45_3092_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3105_hand.jpg", + "images/2025-01-13-17-57-45_3108_hand.jpg", + "images/2025-01-13-17-57-45_3111_hand.jpg", + "images/2025-01-13-17-57-45_3114_hand.jpg", + "images/2025-01-13-17-57-45_3117_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3408_hand.jpg", + "images/2025-01-13-17-57-45_3411_hand.jpg", + "images/2025-01-13-17-57-45_3414_hand.jpg", + "images/2025-01-13-17-57-45_3417_hand.jpg", + "images/2025-01-13-17-57-45_3420_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3442_hand.jpg", + "images/2025-01-13-17-57-45_3445_hand.jpg", + "images/2025-01-13-17-57-45_3448_hand.jpg", + "images/2025-01-13-17-57-45_3451_hand.jpg", + "images/2025-01-13-17-57-45_3454_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3476_hand.jpg", + "images/2025-01-13-17-57-45_3479_hand.jpg", + "images/2025-01-13-17-57-45_3482_hand.jpg", + "images/2025-01-13-17-57-45_3485_hand.jpg", + "images/2025-01-13-17-57-45_3488_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3592_hand.jpg", + "images/2025-01-13-17-57-45_3595_hand.jpg", + "images/2025-01-13-17-57-45_3598_hand.jpg", + "images/2025-01-13-17-57-45_3601_hand.jpg", + "images/2025-01-13-17-57-45_3604_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3620_hand.jpg", + "images/2025-01-13-17-57-45_3623_hand.jpg", + "images/2025-01-13-17-57-45_3626_hand.jpg", + "images/2025-01-13-17-57-45_3629_hand.jpg", + "images/2025-01-13-17-57-45_3632_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3647_hand.jpg", + "images/2025-01-13-17-57-45_3650_hand.jpg", + "images/2025-01-13-17-57-45_3653_hand.jpg", + "images/2025-01-13-17-57-45_3656_hand.jpg", + "images/2025-01-13-17-57-45_3659_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3822_hand.jpg", + "images/2025-01-13-17-57-45_3825_hand.jpg", + "images/2025-01-13-17-57-45_3828_hand.jpg", + "images/2025-01-13-17-57-45_3831_hand.jpg", + "images/2025-01-13-17-57-45_3834_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3888_hand.jpg", + "images/2025-01-13-17-57-45_3891_hand.jpg", + "images/2025-01-13-17-57-45_3894_hand.jpg", + "images/2025-01-13-17-57-45_3897_hand.jpg", + "images/2025-01-13-17-57-45_3900_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3955_hand.jpg", + "images/2025-01-13-17-57-45_3958_hand.jpg", + "images/2025-01-13-17-57-45_3961_hand.jpg", + "images/2025-01-13-17-57-45_3964_hand.jpg", + "images/2025-01-13-17-57-45_3967_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4146_hand.jpg", + "images/2025-01-13-17-57-45_4149_hand.jpg", + "images/2025-01-13-17-57-45_4152_hand.jpg", + "images/2025-01-13-17-57-45_4155_hand.jpg", + "images/2025-01-13-17-57-45_4158_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4181_hand.jpg", + "images/2025-01-13-17-57-45_4184_hand.jpg", + "images/2025-01-13-17-57-45_4187_hand.jpg", + "images/2025-01-13-17-57-45_4190_hand.jpg", + "images/2025-01-13-17-57-45_4193_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4216_hand.jpg", + "images/2025-01-13-17-57-45_4219_hand.jpg", + "images/2025-01-13-17-57-45_4222_hand.jpg", + "images/2025-01-13-17-57-45_4225_hand.jpg", + "images/2025-01-13-17-57-45_4228_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4387_hand.jpg", + "images/2025-01-13-17-57-45_4390_hand.jpg", + "images/2025-01-13-17-57-45_4393_hand.jpg", + "images/2025-01-13-17-57-45_4396_hand.jpg", + "images/2025-01-13-17-57-45_4399_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4443_hand.jpg", + "images/2025-01-13-17-57-45_4446_hand.jpg", + "images/2025-01-13-17-57-45_4449_hand.jpg", + "images/2025-01-13-17-57-45_4452_hand.jpg", + "images/2025-01-13-17-57-45_4455_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4499_hand.jpg", + "images/2025-01-13-17-57-45_4502_hand.jpg", + "images/2025-01-13-17-57-45_4505_hand.jpg", + "images/2025-01-13-17-57-45_4508_hand.jpg", + "images/2025-01-13-17-57-45_4511_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4657_hand.jpg", + "images/2025-01-13-17-57-45_4660_hand.jpg", + "images/2025-01-13-17-57-45_4663_hand.jpg", + "images/2025-01-13-17-57-45_4666_hand.jpg", + "images/2025-01-13-17-57-45_4669_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4686_hand.jpg", + "images/2025-01-13-17-57-45_4689_hand.jpg", + "images/2025-01-13-17-57-45_4692_hand.jpg", + "images/2025-01-13-17-57-45_4695_hand.jpg", + "images/2025-01-13-17-57-45_4698_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4714_hand.jpg", + "images/2025-01-13-17-57-45_4717_hand.jpg", + "images/2025-01-13-17-57-45_4720_hand.jpg", + "images/2025-01-13-17-57-45_4723_hand.jpg", + "images/2025-01-13-17-57-45_4726_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4888_hand.jpg", + "images/2025-01-13-17-57-45_4891_hand.jpg", + "images/2025-01-13-17-57-45_4894_hand.jpg", + "images/2025-01-13-17-57-45_4897_hand.jpg", + "images/2025-01-13-17-57-45_4900_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4952_hand.jpg", + "images/2025-01-13-17-57-45_4955_hand.jpg", + "images/2025-01-13-17-57-45_4958_hand.jpg", + "images/2025-01-13-17-57-45_4961_hand.jpg", + "images/2025-01-13-17-57-45_4964_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5017_hand.jpg", + "images/2025-01-13-17-57-45_5020_hand.jpg", + "images/2025-01-13-17-57-45_5023_hand.jpg", + "images/2025-01-13-17-57-45_5026_hand.jpg", + "images/2025-01-13-17-57-45_5029_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5184_hand.jpg", + "images/2025-01-13-17-57-45_5187_hand.jpg", + "images/2025-01-13-17-57-45_5190_hand.jpg", + "images/2025-01-13-17-57-45_5193_hand.jpg", + "images/2025-01-13-17-57-45_5196_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5208_hand.jpg", + "images/2025-01-13-17-57-45_5211_hand.jpg", + "images/2025-01-13-17-57-45_5214_hand.jpg", + "images/2025-01-13-17-57-45_5217_hand.jpg", + "images/2025-01-13-17-57-45_5220_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5232_hand.jpg", + "images/2025-01-13-17-57-45_5235_hand.jpg", + "images/2025-01-13-17-57-45_5238_hand.jpg", + "images/2025-01-13-17-57-45_5241_hand.jpg", + "images/2025-01-13-17-57-45_5244_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5468_hand.jpg", + "images/2025-01-13-17-57-45_5471_hand.jpg", + "images/2025-01-13-17-57-45_5474_hand.jpg", + "images/2025-01-13-17-57-45_5477_hand.jpg", + "images/2025-01-13-17-57-45_5480_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5570_hand.jpg", + "images/2025-01-13-17-57-45_5573_hand.jpg", + "images/2025-01-13-17-57-45_5576_hand.jpg", + "images/2025-01-13-17-57-45_5579_hand.jpg", + "images/2025-01-13-17-57-45_5582_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5671_hand.jpg", + "images/2025-01-13-17-57-45_5674_hand.jpg", + "images/2025-01-13-17-57-45_5677_hand.jpg", + "images/2025-01-13-17-57-45_5680_hand.jpg", + "images/2025-01-13-17-57-45_5683_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5920_hand.jpg", + "images/2025-01-13-17-57-45_5923_hand.jpg", + "images/2025-01-13-17-57-45_5926_hand.jpg", + "images/2025-01-13-17-57-45_5929_hand.jpg", + "images/2025-01-13-17-57-45_5932_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5951_hand.jpg", + "images/2025-01-13-17-57-45_5954_hand.jpg", + "images/2025-01-13-17-57-45_5957_hand.jpg", + "images/2025-01-13-17-57-45_5960_hand.jpg", + "images/2025-01-13-17-57-45_5963_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5982_hand.jpg", + "images/2025-01-13-17-57-45_5985_hand.jpg", + "images/2025-01-13-17-57-45_5988_hand.jpg", + "images/2025-01-13-17-57-45_5991_hand.jpg", + "images/2025-01-13-17-57-45_5994_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6141_hand.jpg", + "images/2025-01-13-17-57-45_6144_hand.jpg", + "images/2025-01-13-17-57-45_6147_hand.jpg", + "images/2025-01-13-17-57-45_6150_hand.jpg", + "images/2025-01-13-17-57-45_6153_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6194_hand.jpg", + "images/2025-01-13-17-57-45_6197_hand.jpg", + "images/2025-01-13-17-57-45_6200_hand.jpg", + "images/2025-01-13-17-57-45_6203_hand.jpg", + "images/2025-01-13-17-57-45_6206_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6246_hand.jpg", + "images/2025-01-13-17-57-45_6249_hand.jpg", + "images/2025-01-13-17-57-45_6252_hand.jpg", + "images/2025-01-13-17-57-45_6255_hand.jpg", + "images/2025-01-13-17-57-45_6258_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6410_hand.jpg", + "images/2025-01-13-17-57-45_6413_hand.jpg", + "images/2025-01-13-17-57-45_6416_hand.jpg", + "images/2025-01-13-17-57-45_6419_hand.jpg", + "images/2025-01-13-17-57-45_6422_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6444_hand.jpg", + "images/2025-01-13-17-57-45_6447_hand.jpg", + "images/2025-01-13-17-57-45_6450_hand.jpg", + "images/2025-01-13-17-57-45_6453_hand.jpg", + "images/2025-01-13-17-57-45_6456_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6478_hand.jpg", + "images/2025-01-13-17-57-45_6481_hand.jpg", + "images/2025-01-13-17-57-45_6484_hand.jpg", + "images/2025-01-13-17-57-45_6487_hand.jpg", + "images/2025-01-13-17-57-45_6490_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6619_hand.jpg", + "images/2025-01-13-17-57-45_6622_hand.jpg", + "images/2025-01-13-17-57-45_6625_hand.jpg", + "images/2025-01-13-17-57-45_6628_hand.jpg", + "images/2025-01-13-17-57-45_6631_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6660_hand.jpg", + "images/2025-01-13-17-57-45_6663_hand.jpg", + "images/2025-01-13-17-57-45_6666_hand.jpg", + "images/2025-01-13-17-57-45_6669_hand.jpg", + "images/2025-01-13-17-57-45_6672_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6702_hand.jpg", + "images/2025-01-13-17-57-45_6705_hand.jpg", + "images/2025-01-13-17-57-45_6708_hand.jpg", + "images/2025-01-13-17-57-45_6711_hand.jpg", + "images/2025-01-13-17-57-45_6714_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6826_hand.jpg", + "images/2025-01-13-17-57-45_6829_hand.jpg", + "images/2025-01-13-17-57-45_6832_hand.jpg", + "images/2025-01-13-17-57-45_6835_hand.jpg", + "images/2025-01-13-17-57-45_6838_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6852_hand.jpg", + "images/2025-01-13-17-57-45_6855_hand.jpg", + "images/2025-01-13-17-57-45_6858_hand.jpg", + "images/2025-01-13-17-57-45_6861_hand.jpg", + "images/2025-01-13-17-57-45_6864_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6878_hand.jpg", + "images/2025-01-13-17-57-45_6881_hand.jpg", + "images/2025-01-13-17-57-45_6884_hand.jpg", + "images/2025-01-13-17-57-45_6887_hand.jpg", + "images/2025-01-13-17-57-45_6890_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6989_hand.jpg", + "images/2025-01-13-17-57-45_6992_hand.jpg", + "images/2025-01-13-17-57-45_6995_hand.jpg", + "images/2025-01-13-17-57-45_6998_hand.jpg", + "images/2025-01-13-17-57-45_7001_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7022_hand.jpg", + "images/2025-01-13-17-57-45_7025_hand.jpg", + "images/2025-01-13-17-57-45_7028_hand.jpg", + "images/2025-01-13-17-57-45_7031_hand.jpg", + "images/2025-01-13-17-57-45_7034_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7056_hand.jpg", + "images/2025-01-13-17-57-45_7059_hand.jpg", + "images/2025-01-13-17-57-45_7062_hand.jpg", + "images/2025-01-13-17-57-45_7065_hand.jpg", + "images/2025-01-13-17-57-45_7068_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7281_hand.jpg", + "images/2025-01-13-17-57-45_7284_hand.jpg", + "images/2025-01-13-17-57-45_7287_hand.jpg", + "images/2025-01-13-17-57-45_7290_hand.jpg", + "images/2025-01-13-17-57-45_7293_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7296_hand.jpg", + "images/2025-01-13-17-57-45_7299_hand.jpg", + "images/2025-01-13-17-57-45_7302_hand.jpg", + "images/2025-01-13-17-57-45_7305_hand.jpg", + "images/2025-01-13-17-57-45_7308_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7312_hand.jpg", + "images/2025-01-13-17-57-45_7315_hand.jpg", + "images/2025-01-13-17-57-45_7318_hand.jpg", + "images/2025-01-13-17-57-45_7321_hand.jpg", + "images/2025-01-13-17-57-45_7324_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7400_hand.jpg", + "images/2025-01-13-17-57-45_7403_hand.jpg", + "images/2025-01-13-17-57-45_7406_hand.jpg", + "images/2025-01-13-17-57-45_7409_hand.jpg", + "images/2025-01-13-17-57-45_7412_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7430_hand.jpg", + "images/2025-01-13-17-57-45_7433_hand.jpg", + "images/2025-01-13-17-57-45_7436_hand.jpg", + "images/2025-01-13-17-57-45_7439_hand.jpg", + "images/2025-01-13-17-57-45_7442_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7459_hand.jpg", + "images/2025-01-13-17-57-45_7462_hand.jpg", + "images/2025-01-13-17-57-45_7465_hand.jpg", + "images/2025-01-13-17-57-45_7468_hand.jpg", + "images/2025-01-13-17-57-45_7471_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7564_hand.jpg", + "images/2025-01-13-17-57-45_7567_hand.jpg", + "images/2025-01-13-17-57-45_7570_hand.jpg", + "images/2025-01-13-17-57-45_7573_hand.jpg", + "images/2025-01-13-17-57-45_7576_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7589_hand.jpg", + "images/2025-01-13-17-57-45_7592_hand.jpg", + "images/2025-01-13-17-57-45_7595_hand.jpg", + "images/2025-01-13-17-57-45_7598_hand.jpg", + "images/2025-01-13-17-57-45_7601_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7614_hand.jpg", + "images/2025-01-13-17-57-45_7617_hand.jpg", + "images/2025-01-13-17-57-45_7620_hand.jpg", + "images/2025-01-13-17-57-45_7623_hand.jpg", + "images/2025-01-13-17-57-45_7626_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7750_hand.jpg", + "images/2025-01-13-17-57-45_7753_hand.jpg", + "images/2025-01-13-17-57-45_7756_hand.jpg", + "images/2025-01-13-17-57-45_7759_hand.jpg", + "images/2025-01-13-17-57-45_7762_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7798_hand.jpg", + "images/2025-01-13-17-57-45_7801_hand.jpg", + "images/2025-01-13-17-57-45_7804_hand.jpg", + "images/2025-01-13-17-57-45_7807_hand.jpg", + "images/2025-01-13-17-57-45_7810_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7845_hand.jpg", + "images/2025-01-13-17-57-45_7848_hand.jpg", + "images/2025-01-13-17-57-45_7851_hand.jpg", + "images/2025-01-13-17-57-45_7854_hand.jpg", + "images/2025-01-13-17-57-45_7857_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7999_hand.jpg", + "images/2025-01-13-17-57-45_8002_hand.jpg", + "images/2025-01-13-17-57-45_8005_hand.jpg", + "images/2025-01-13-17-57-45_8008_hand.jpg", + "images/2025-01-13-17-57-45_8011_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8034_hand.jpg", + "images/2025-01-13-17-57-45_8037_hand.jpg", + "images/2025-01-13-17-57-45_8040_hand.jpg", + "images/2025-01-13-17-57-45_8043_hand.jpg", + "images/2025-01-13-17-57-45_8046_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8068_hand.jpg", + "images/2025-01-13-17-57-45_8071_hand.jpg", + "images/2025-01-13-17-57-45_8074_hand.jpg", + "images/2025-01-13-17-57-45_8077_hand.jpg", + "images/2025-01-13-17-57-45_8080_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8325_hand.jpg", + "images/2025-01-13-17-57-45_8328_hand.jpg", + "images/2025-01-13-17-57-45_8331_hand.jpg", + "images/2025-01-13-17-57-45_8334_hand.jpg", + "images/2025-01-13-17-57-45_8337_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8428_hand.jpg", + "images/2025-01-13-17-57-45_8431_hand.jpg", + "images/2025-01-13-17-57-45_8434_hand.jpg", + "images/2025-01-13-17-57-45_8437_hand.jpg", + "images/2025-01-13-17-57-45_8440_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8530_hand.jpg", + "images/2025-01-13-17-57-45_8533_hand.jpg", + "images/2025-01-13-17-57-45_8536_hand.jpg", + "images/2025-01-13-17-57-45_8539_hand.jpg", + "images/2025-01-13-17-57-45_8542_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8780_hand.jpg", + "images/2025-01-13-17-57-45_8783_hand.jpg", + "images/2025-01-13-17-57-45_8786_hand.jpg", + "images/2025-01-13-17-57-45_8789_hand.jpg", + "images/2025-01-13-17-57-45_8792_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8811_hand.jpg", + "images/2025-01-13-17-57-45_8814_hand.jpg", + "images/2025-01-13-17-57-45_8817_hand.jpg", + "images/2025-01-13-17-57-45_8820_hand.jpg", + "images/2025-01-13-17-57-45_8823_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8842_hand.jpg", + "images/2025-01-13-17-57-45_8845_hand.jpg", + "images/2025-01-13-17-57-45_8848_hand.jpg", + "images/2025-01-13-17-57-45_8851_hand.jpg", + "images/2025-01-13-17-57-45_8854_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_9001_hand.jpg", + "images/2025-01-13-17-57-45_9004_hand.jpg", + "images/2025-01-13-17-57-45_9007_hand.jpg", + "images/2025-01-13-17-57-45_9010_hand.jpg", + "images/2025-01-13-17-57-45_9013_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_9056_hand.jpg", + "images/2025-01-13-17-57-45_9059_hand.jpg", + "images/2025-01-13-17-57-45_9062_hand.jpg", + "images/2025-01-13-17-57-45_9065_hand.jpg", + "images/2025-01-13-17-57-45_9068_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_9110_hand.jpg", + "images/2025-01-13-17-57-45_9113_hand.jpg", + "images/2025-01-13-17-57-45_9116_hand.jpg", + "images/2025-01-13-17-57-45_9119_hand.jpg", + "images/2025-01-13-17-57-45_9122_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_9276_hand.jpg", + "images/2025-01-13-17-57-45_9279_hand.jpg", + "images/2025-01-13-17-57-45_9282_hand.jpg", + "images/2025-01-13-17-57-45_9285_hand.jpg", + "images/2025-01-13-17-57-45_9288_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_9311_hand.jpg", + "images/2025-01-13-17-57-45_9314_hand.jpg", + "images/2025-01-13-17-57-45_9317_hand.jpg", + "images/2025-01-13-17-57-45_9320_hand.jpg", + "images/2025-01-13-17-57-45_9323_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_9346_hand.jpg", + "images/2025-01-13-17-57-45_9349_hand.jpg", + "images/2025-01-13-17-57-45_9352_hand.jpg", + "images/2025-01-13-17-57-45_9355_hand.jpg", + "images/2025-01-13-17-57-45_9358_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10135_hand.jpg", + "images/2025-01-13-17-57-45_10138_hand.jpg", + "images/2025-01-13-17-57-45_10141_hand.jpg", + "images/2025-01-13-17-57-45_10144_hand.jpg", + "images/2025-01-13-17-57-45_10147_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10221_hand.jpg", + "images/2025-01-13-17-57-45_10224_hand.jpg", + "images/2025-01-13-17-57-45_10227_hand.jpg", + "images/2025-01-13-17-57-45_10230_hand.jpg", + "images/2025-01-13-17-57-45_10233_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10307_hand.jpg", + "images/2025-01-13-17-57-45_10310_hand.jpg", + "images/2025-01-13-17-57-45_10313_hand.jpg", + "images/2025-01-13-17-57-45_10316_hand.jpg", + "images/2025-01-13-17-57-45_10319_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10529_hand.jpg", + "images/2025-01-13-17-57-45_10532_hand.jpg", + "images/2025-01-13-17-57-45_10535_hand.jpg", + "images/2025-01-13-17-57-45_10538_hand.jpg", + "images/2025-01-13-17-57-45_10541_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10562_hand.jpg", + "images/2025-01-13-17-57-45_10565_hand.jpg", + "images/2025-01-13-17-57-45_10568_hand.jpg", + "images/2025-01-13-17-57-45_10571_hand.jpg", + "images/2025-01-13-17-57-45_10574_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10594_hand.jpg", + "images/2025-01-13-17-57-45_10597_hand.jpg", + "images/2025-01-13-17-57-45_10600_hand.jpg", + "images/2025-01-13-17-57-45_10603_hand.jpg", + "images/2025-01-13-17-57-45_10606_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10690_hand.jpg", + "images/2025-01-13-17-57-45_10693_hand.jpg", + "images/2025-01-13-17-57-45_10696_hand.jpg", + "images/2025-01-13-17-57-45_10699_hand.jpg", + "images/2025-01-13-17-57-45_10702_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10710_hand.jpg", + "images/2025-01-13-17-57-45_10713_hand.jpg", + "images/2025-01-13-17-57-45_10716_hand.jpg", + "images/2025-01-13-17-57-45_10719_hand.jpg", + "images/2025-01-13-17-57-45_10722_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10729_hand.jpg", + "images/2025-01-13-17-57-45_10732_hand.jpg", + "images/2025-01-13-17-57-45_10735_hand.jpg", + "images/2025-01-13-17-57-45_10738_hand.jpg", + "images/2025-01-13-17-57-45_10741_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10806_hand.jpg", + "images/2025-01-13-17-57-45_10809_hand.jpg", + "images/2025-01-13-17-57-45_10812_hand.jpg", + "images/2025-01-13-17-57-45_10815_hand.jpg", + "images/2025-01-13-17-57-45_10818_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10829_hand.jpg", + "images/2025-01-13-17-57-45_10832_hand.jpg", + "images/2025-01-13-17-57-45_10835_hand.jpg", + "images/2025-01-13-17-57-45_10838_hand.jpg", + "images/2025-01-13-17-57-45_10841_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10852_hand.jpg", + "images/2025-01-13-17-57-45_10855_hand.jpg", + "images/2025-01-13-17-57-45_10858_hand.jpg", + "images/2025-01-13-17-57-45_10861_hand.jpg", + "images/2025-01-13-17-57-45_10864_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11066_hand.jpg", + "images/2025-01-13-17-57-45_11069_hand.jpg", + "images/2025-01-13-17-57-45_11072_hand.jpg", + "images/2025-01-13-17-57-45_11075_hand.jpg", + "images/2025-01-13-17-57-45_11078_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11158_hand.jpg", + "images/2025-01-13-17-57-45_11161_hand.jpg", + "images/2025-01-13-17-57-45_11164_hand.jpg", + "images/2025-01-13-17-57-45_11167_hand.jpg", + "images/2025-01-13-17-57-45_11170_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11251_hand.jpg", + "images/2025-01-13-17-57-45_11254_hand.jpg", + "images/2025-01-13-17-57-45_11257_hand.jpg", + "images/2025-01-13-17-57-45_11260_hand.jpg", + "images/2025-01-13-17-57-45_11263_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11489_hand.jpg", + "images/2025-01-13-17-57-45_11492_hand.jpg", + "images/2025-01-13-17-57-45_11495_hand.jpg", + "images/2025-01-13-17-57-45_11498_hand.jpg", + "images/2025-01-13-17-57-45_11501_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11524_hand.jpg", + "images/2025-01-13-17-57-45_11527_hand.jpg", + "images/2025-01-13-17-57-45_11530_hand.jpg", + "images/2025-01-13-17-57-45_11533_hand.jpg", + "images/2025-01-13-17-57-45_11536_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11558_hand.jpg", + "images/2025-01-13-17-57-45_11561_hand.jpg", + "images/2025-01-13-17-57-45_11564_hand.jpg", + "images/2025-01-13-17-57-45_11567_hand.jpg", + "images/2025-01-13-17-57-45_11570_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11798_hand.jpg", + "images/2025-01-13-17-57-45_11801_hand.jpg", + "images/2025-01-13-17-57-45_11804_hand.jpg", + "images/2025-01-13-17-57-45_11807_hand.jpg", + "images/2025-01-13-17-57-45_11810_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11891_hand.jpg", + "images/2025-01-13-17-57-45_11894_hand.jpg", + "images/2025-01-13-17-57-45_11897_hand.jpg", + "images/2025-01-13-17-57-45_11900_hand.jpg", + "images/2025-01-13-17-57-45_11903_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11984_hand.jpg", + "images/2025-01-13-17-57-45_11987_hand.jpg", + "images/2025-01-13-17-57-45_11990_hand.jpg", + "images/2025-01-13-17-57-45_11993_hand.jpg", + "images/2025-01-13-17-57-45_11996_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12226_hand.jpg", + "images/2025-01-13-17-57-45_12229_hand.jpg", + "images/2025-01-13-17-57-45_12232_hand.jpg", + "images/2025-01-13-17-57-45_12235_hand.jpg", + "images/2025-01-13-17-57-45_12238_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12262_hand.jpg", + "images/2025-01-13-17-57-45_12265_hand.jpg", + "images/2025-01-13-17-57-45_12268_hand.jpg", + "images/2025-01-13-17-57-45_12271_hand.jpg", + "images/2025-01-13-17-57-45_12274_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12298_hand.jpg", + "images/2025-01-13-17-57-45_12301_hand.jpg", + "images/2025-01-13-17-57-45_12304_hand.jpg", + "images/2025-01-13-17-57-45_12307_hand.jpg", + "images/2025-01-13-17-57-45_12310_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12622_hand.jpg", + "images/2025-01-13-17-57-45_12625_hand.jpg", + "images/2025-01-13-17-57-45_12628_hand.jpg", + "images/2025-01-13-17-57-45_12631_hand.jpg", + "images/2025-01-13-17-57-45_12634_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12642_hand.jpg", + "images/2025-01-13-17-57-45_12645_hand.jpg", + "images/2025-01-13-17-57-45_12648_hand.jpg", + "images/2025-01-13-17-57-45_12651_hand.jpg", + "images/2025-01-13-17-57-45_12654_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12663_hand.jpg", + "images/2025-01-13-17-57-45_12666_hand.jpg", + "images/2025-01-13-17-57-45_12669_hand.jpg", + "images/2025-01-13-17-57-45_12672_hand.jpg", + "images/2025-01-13-17-57-45_12675_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12756_hand.jpg", + "images/2025-01-13-17-57-45_12759_hand.jpg", + "images/2025-01-13-17-57-45_12762_hand.jpg", + "images/2025-01-13-17-57-45_12765_hand.jpg", + "images/2025-01-13-17-57-45_12768_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12784_hand.jpg", + "images/2025-01-13-17-57-45_12787_hand.jpg", + "images/2025-01-13-17-57-45_12790_hand.jpg", + "images/2025-01-13-17-57-45_12793_hand.jpg", + "images/2025-01-13-17-57-45_12796_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12812_hand.jpg", + "images/2025-01-13-17-57-45_12815_hand.jpg", + "images/2025-01-13-17-57-45_12818_hand.jpg", + "images/2025-01-13-17-57-45_12821_hand.jpg", + "images/2025-01-13-17-57-45_12824_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12931_hand.jpg", + "images/2025-01-13-17-57-45_12934_hand.jpg", + "images/2025-01-13-17-57-45_12937_hand.jpg", + "images/2025-01-13-17-57-45_12940_hand.jpg", + "images/2025-01-13-17-57-45_12943_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12966_hand.jpg", + "images/2025-01-13-17-57-45_12969_hand.jpg", + "images/2025-01-13-17-57-45_12972_hand.jpg", + "images/2025-01-13-17-57-45_12975_hand.jpg", + "images/2025-01-13-17-57-45_12978_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-13-17-57-45_13002_hand.jpg", + "images/2025-01-13-17-57-45_13005_hand.jpg", + "images/2025-01-13-17-57-45_13008_hand.jpg", + "images/2025-01-13-17-57-45_13011_hand.jpg", + "images/2025-01-13-17-57-45_13014_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Release\nB. Grasp\nC. Align\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Grasp" + } + ], + "images": [ + "images/2025-01-13-17-57-45_173_hand.jpg", + "images/2025-01-13-17-57-45_176_hand.jpg", + "images/2025-01-13-17-57-45_179_hand.jpg", + "images/2025-01-13-17-57-45_182_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Approach\nB. Grasp\nC. Lift\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Grasp" + } + ], + "images": [ + "images/2025-01-13-17-57-45_327_hand.jpg", + "images/2025-01-13-17-57-45_330_hand.jpg", + "images/2025-01-13-17-57-45_333_hand.jpg", + "images/2025-01-13-17-57-45_336_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Lift\nB. Release\nC. Push\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Lift" + } + ], + "images": [ + "images/2025-01-13-17-57-45_386_hand.jpg", + "images/2025-01-13-17-57-45_389_hand.jpg", + "images/2025-01-13-17-57-45_392_hand.jpg", + "images/2025-01-13-17-57-45_395_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Approach\nB. Pull\nC. Release\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Approach" + } + ], + "images": [ + "images/2025-01-13-17-57-45_460_hand.jpg", + "images/2025-01-13-17-57-45_463_hand.jpg", + "images/2025-01-13-17-57-45_466_hand.jpg", + "images/2025-01-13-17-57-45_469_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Nudge\nB. Lift\nC. Grasp\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Grasp" + } + ], + "images": [ + "images/2025-01-13-17-57-45_602_hand.jpg", + "images/2025-01-13-17-57-45_605_hand.jpg", + "images/2025-01-13-17-57-45_608_hand.jpg", + "images/2025-01-13-17-57-45_611_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Push\nB. Lift\nC. Release\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Lift" + } + ], + "images": [ + "images/2025-01-13-17-57-45_690_hand.jpg", + "images/2025-01-13-17-57-45_693_hand.jpg", + "images/2025-01-13-17-57-45_696_hand.jpg", + "images/2025-01-13-17-57-45_699_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Grasp\nB. Nudge\nC. Approach\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Approach" + } + ], + "images": [ + "images/2025-01-13-17-57-45_846_hand.jpg", + "images/2025-01-13-17-57-45_849_hand.jpg", + "images/2025-01-13-17-57-45_852_hand.jpg", + "images/2025-01-13-17-57-45_855_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Twist\nB. Align\nC. Release\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_1309_hand.jpg", + "images/2025-01-13-17-57-45_1312_hand.jpg", + "images/2025-01-13-17-57-45_1315_hand.jpg", + "images/2025-01-13-17-57-45_1318_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Lift\nB. Nudge\nC. Approach\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Approach" + } + ], + "images": [ + "images/2025-01-13-17-57-45_1678_hand.jpg", + "images/2025-01-13-17-57-45_1681_hand.jpg", + "images/2025-01-13-17-57-45_1684_hand.jpg", + "images/2025-01-13-17-57-45_1687_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Twist\nB. Align\nC. Lift\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_1910_hand.jpg", + "images/2025-01-13-17-57-45_1913_hand.jpg", + "images/2025-01-13-17-57-45_1916_hand.jpg", + "images/2025-01-13-17-57-45_1919_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Pull\nC. Release\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_2102_hand.jpg", + "images/2025-01-13-17-57-45_2105_hand.jpg", + "images/2025-01-13-17-57-45_2108_hand.jpg", + "images/2025-01-13-17-57-45_2111_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Grasp\nB. Push\nC. Release\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Grasp" + } + ], + "images": [ + "images/2025-01-13-17-57-45_2250_hand.jpg", + "images/2025-01-13-17-57-45_2253_hand.jpg", + "images/2025-01-13-17-57-45_2256_hand.jpg", + "images/2025-01-13-17-57-45_2259_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Twist\nB. Approach\nC. Align\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Approach" + } + ], + "images": [ + "images/2025-01-13-17-57-45_2478_hand.jpg", + "images/2025-01-13-17-57-45_2481_hand.jpg", + "images/2025-01-13-17-57-45_2484_hand.jpg", + "images/2025-01-13-17-57-45_2487_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Lift\nB. Grasp\nC. Align\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_2600_hand.jpg", + "images/2025-01-13-17-57-45_2603_hand.jpg", + "images/2025-01-13-17-57-45_2606_hand.jpg", + "images/2025-01-13-17-57-45_2609_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Nudge\nB. Push\nC. Approach\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Twist" + } + ], + "images": [ + "images/2025-01-13-17-57-45_2804_hand.jpg", + "images/2025-01-13-17-57-45_2807_hand.jpg", + "images/2025-01-13-17-57-45_2810_hand.jpg", + "images/2025-01-13-17-57-45_2813_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Release\nC. Twist\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_2990_hand.jpg", + "images/2025-01-13-17-57-45_2993_hand.jpg", + "images/2025-01-13-17-57-45_2996_hand.jpg", + "images/2025-01-13-17-57-45_2999_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Grasp\nB. Approach\nC. Lift\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Grasp" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3067_hand.jpg", + "images/2025-01-13-17-57-45_3070_hand.jpg", + "images/2025-01-13-17-57-45_3073_hand.jpg", + "images/2025-01-13-17-57-45_3076_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Push\nB. Pull\nC. Lift\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Lift" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3140_hand.jpg", + "images/2025-01-13-17-57-45_3143_hand.jpg", + "images/2025-01-13-17-57-45_3146_hand.jpg", + "images/2025-01-13-17-57-45_3149_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Grasp\nB. Align\nC. Twist\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Approach" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3248_hand.jpg", + "images/2025-01-13-17-57-45_3251_hand.jpg", + "images/2025-01-13-17-57-45_3254_hand.jpg", + "images/2025-01-13-17-57-45_3257_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Grasp\nB. Lift\nC. Approach\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Approach" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3428_hand.jpg", + "images/2025-01-13-17-57-45_3431_hand.jpg", + "images/2025-01-13-17-57-45_3434_hand.jpg", + "images/2025-01-13-17-57-45_3437_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Lift\nB. Nudge\nC. Release\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3528_hand.jpg", + "images/2025-01-13-17-57-45_3531_hand.jpg", + "images/2025-01-13-17-57-45_3534_hand.jpg", + "images/2025-01-13-17-57-45_3537_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Approach\nB. Grasp\nC. Lift\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Grasp" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3611_hand.jpg", + "images/2025-01-13-17-57-45_3614_hand.jpg", + "images/2025-01-13-17-57-45_3617_hand.jpg", + "images/2025-01-13-17-57-45_3620_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Release\nB. Pull\nC. Approach\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Lift" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3689_hand.jpg", + "images/2025-01-13-17-57-45_3692_hand.jpg", + "images/2025-01-13-17-57-45_3695_hand.jpg", + "images/2025-01-13-17-57-45_3698_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Grasp\nC. Approach\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Approach" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3808_hand.jpg", + "images/2025-01-13-17-57-45_3811_hand.jpg", + "images/2025-01-13-17-57-45_3814_hand.jpg", + "images/2025-01-13-17-57-45_3817_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Pull\nB. Grasp\nC. Push\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3990_hand.jpg", + "images/2025-01-13-17-57-45_3993_hand.jpg", + "images/2025-01-13-17-57-45_3996_hand.jpg", + "images/2025-01-13-17-57-45_3999_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Nudge\nB. Grasp\nC. Align\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4071_hand.jpg", + "images/2025-01-13-17-57-45_4074_hand.jpg", + "images/2025-01-13-17-57-45_4077_hand.jpg", + "images/2025-01-13-17-57-45_4080_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Grasp\nC. Nudge\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Grasp" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4175_hand.jpg", + "images/2025-01-13-17-57-45_4178_hand.jpg", + "images/2025-01-13-17-57-45_4181_hand.jpg", + "images/2025-01-13-17-57-45_4184_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Pull\nB. Lift\nC. Grasp\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Lift" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4276_hand.jpg", + "images/2025-01-13-17-57-45_4279_hand.jpg", + "images/2025-01-13-17-57-45_4282_hand.jpg", + "images/2025-01-13-17-57-45_4285_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Push\nC. Approach\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Approach" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4356_hand.jpg", + "images/2025-01-13-17-57-45_4359_hand.jpg", + "images/2025-01-13-17-57-45_4362_hand.jpg", + "images/2025-01-13-17-57-45_4365_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Lift\nB. Twist\nC. Push\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4470_hand.jpg", + "images/2025-01-13-17-57-45_4473_hand.jpg", + "images/2025-01-13-17-57-45_4476_hand.jpg", + "images/2025-01-13-17-57-45_4479_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Release\nB. Approach\nC. Grasp\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Push" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4558_hand.jpg", + "images/2025-01-13-17-57-45_4561_hand.jpg", + "images/2025-01-13-17-57-45_4564_hand.jpg", + "images/2025-01-13-17-57-45_4567_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Grasp\nB. Pull\nC. Twist\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Grasp" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4678_hand.jpg", + "images/2025-01-13-17-57-45_4681_hand.jpg", + "images/2025-01-13-17-57-45_4684_hand.jpg", + "images/2025-01-13-17-57-45_4687_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Nudge\nB. Push\nC. Lift\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Lift" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4760_hand.jpg", + "images/2025-01-13-17-57-45_4763_hand.jpg", + "images/2025-01-13-17-57-45_4766_hand.jpg", + "images/2025-01-13-17-57-45_4769_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Push\nB. Align\nC. Approach\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Approach" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4846_hand.jpg", + "images/2025-01-13-17-57-45_4849_hand.jpg", + "images/2025-01-13-17-57-45_4852_hand.jpg", + "images/2025-01-13-17-57-45_4855_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Release\nB. Lift\nC. Approach\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5021_hand.jpg", + "images/2025-01-13-17-57-45_5024_hand.jpg", + "images/2025-01-13-17-57-45_5027_hand.jpg", + "images/2025-01-13-17-57-45_5030_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Release\nB. Pull\nC. Grasp\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5128_hand.jpg", + "images/2025-01-13-17-57-45_5131_hand.jpg", + "images/2025-01-13-17-57-45_5134_hand.jpg", + "images/2025-01-13-17-57-45_5137_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Lift\nB. Approach\nC. Twist\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Grasp" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5204_hand.jpg", + "images/2025-01-13-17-57-45_5207_hand.jpg", + "images/2025-01-13-17-57-45_5210_hand.jpg", + "images/2025-01-13-17-57-45_5213_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Push\nC. Twist\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Approach" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5370_hand.jpg", + "images/2025-01-13-17-57-45_5373_hand.jpg", + "images/2025-01-13-17-57-45_5376_hand.jpg", + "images/2025-01-13-17-57-45_5379_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Nudge\nB. Twist\nC. Align\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5516_hand.jpg", + "images/2025-01-13-17-57-45_5519_hand.jpg", + "images/2025-01-13-17-57-45_5522_hand.jpg", + "images/2025-01-13-17-57-45_5525_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Twist\nB. Nudge\nC. Release\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Push" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5716_hand.jpg", + "images/2025-01-13-17-57-45_5719_hand.jpg", + "images/2025-01-13-17-57-45_5722_hand.jpg", + "images/2025-01-13-17-57-45_5725_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Grasp\nB. Nudge\nC. Lift\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Grasp" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5940_hand.jpg", + "images/2025-01-13-17-57-45_5943_hand.jpg", + "images/2025-01-13-17-57-45_5946_hand.jpg", + "images/2025-01-13-17-57-45_5949_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Pull\nB. Nudge\nC. Twist\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Lift" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6030_hand.jpg", + "images/2025-01-13-17-57-45_6033_hand.jpg", + "images/2025-01-13-17-57-45_6036_hand.jpg", + "images/2025-01-13-17-57-45_6039_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Approach\nB. Twist\nC. Lift\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Approach" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6158_hand.jpg", + "images/2025-01-13-17-57-45_6161_hand.jpg", + "images/2025-01-13-17-57-45_6164_hand.jpg", + "images/2025-01-13-17-57-45_6167_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Pull\nB. Align\nC. Release\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6295_hand.jpg", + "images/2025-01-13-17-57-45_6298_hand.jpg", + "images/2025-01-13-17-57-45_6301_hand.jpg", + "images/2025-01-13-17-57-45_6304_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Push\nB. Approach\nC. Release\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6331_hand.jpg", + "images/2025-01-13-17-57-45_6334_hand.jpg", + "images/2025-01-13-17-57-45_6337_hand.jpg", + "images/2025-01-13-17-57-45_6340_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Lift\nB. Twist\nC. Push\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Grasp" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6428_hand.jpg", + "images/2025-01-13-17-57-45_6431_hand.jpg", + "images/2025-01-13-17-57-45_6434_hand.jpg", + "images/2025-01-13-17-57-45_6437_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Twist\nC. Lift\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Lift" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6527_hand.jpg", + "images/2025-01-13-17-57-45_6530_hand.jpg", + "images/2025-01-13-17-57-45_6533_hand.jpg", + "images/2025-01-13-17-57-45_6536_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Pull\nB. Lift\nC. Grasp\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Approach" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6600_hand.jpg", + "images/2025-01-13-17-57-45_6603_hand.jpg", + "images/2025-01-13-17-57-45_6606_hand.jpg", + "images/2025-01-13-17-57-45_6609_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Grasp\nB. Align\nC. Twist\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6710_hand.jpg", + "images/2025-01-13-17-57-45_6713_hand.jpg", + "images/2025-01-13-17-57-45_6716_hand.jpg", + "images/2025-01-13-17-57-45_6719_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Push\nB. Release\nC. Align\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6770_hand.jpg", + "images/2025-01-13-17-57-45_6773_hand.jpg", + "images/2025-01-13-17-57-45_6776_hand.jpg", + "images/2025-01-13-17-57-45_6779_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Release\nB. Twist\nC. Align\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Grasp" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6842_hand.jpg", + "images/2025-01-13-17-57-45_6845_hand.jpg", + "images/2025-01-13-17-57-45_6848_hand.jpg", + "images/2025-01-13-17-57-45_6851_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Twist\nB. Approach\nC. Lift\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Lift" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6916_hand.jpg", + "images/2025-01-13-17-57-45_6919_hand.jpg", + "images/2025-01-13-17-57-45_6922_hand.jpg", + "images/2025-01-13-17-57-45_6925_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Push\nB. Twist\nC. Approach\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Approach" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6992_hand.jpg", + "images/2025-01-13-17-57-45_6995_hand.jpg", + "images/2025-01-13-17-57-45_6998_hand.jpg", + "images/2025-01-13-17-57-45_7001_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Nudge\nB. Pull\nC. Lift\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7076_hand.jpg", + "images/2025-01-13-17-57-45_7079_hand.jpg", + "images/2025-01-13-17-57-45_7082_hand.jpg", + "images/2025-01-13-17-57-45_7085_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Approach\nB. Release\nC. Nudge\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7106_hand.jpg", + "images/2025-01-13-17-57-45_7109_hand.jpg", + "images/2025-01-13-17-57-45_7112_hand.jpg", + "images/2025-01-13-17-57-45_7115_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Push\nB. Pull\nC. Release\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Grasp" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7184_hand.jpg", + "images/2025-01-13-17-57-45_7187_hand.jpg", + "images/2025-01-13-17-57-45_7190_hand.jpg", + "images/2025-01-13-17-57-45_7193_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Approach\nB. Release\nC. Grasp\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Grasp" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7286_hand.jpg", + "images/2025-01-13-17-57-45_7289_hand.jpg", + "images/2025-01-13-17-57-45_7292_hand.jpg", + "images/2025-01-13-17-57-45_7295_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Lift\nB. Pull\nC. Grasp\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Lift" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7332_hand.jpg", + "images/2025-01-13-17-57-45_7335_hand.jpg", + "images/2025-01-13-17-57-45_7338_hand.jpg", + "images/2025-01-13-17-57-45_7341_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Nudge\nB. Approach\nC. Align\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Approach" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7408_hand.jpg", + "images/2025-01-13-17-57-45_7411_hand.jpg", + "images/2025-01-13-17-57-45_7414_hand.jpg", + "images/2025-01-13-17-57-45_7417_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Push\nC. Grasp\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7486_hand.jpg", + "images/2025-01-13-17-57-45_7489_hand.jpg", + "images/2025-01-13-17-57-45_7492_hand.jpg", + "images/2025-01-13-17-57-45_7495_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Approach\nB. Twist\nC. Release\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7507_hand.jpg", + "images/2025-01-13-17-57-45_7510_hand.jpg", + "images/2025-01-13-17-57-45_7513_hand.jpg", + "images/2025-01-13-17-57-45_7516_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Lift\nB. Twist\nC. Pull\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Grasp" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7580_hand.jpg", + "images/2025-01-13-17-57-45_7583_hand.jpg", + "images/2025-01-13-17-57-45_7586_hand.jpg", + "images/2025-01-13-17-57-45_7589_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Approach\nB. Lift\nC. Release\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Lift" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7653_hand.jpg", + "images/2025-01-13-17-57-45_7656_hand.jpg", + "images/2025-01-13-17-57-45_7659_hand.jpg", + "images/2025-01-13-17-57-45_7662_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Release\nB. Pull\nC. Lift\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Approach" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7724_hand.jpg", + "images/2025-01-13-17-57-45_7727_hand.jpg", + "images/2025-01-13-17-57-45_7730_hand.jpg", + "images/2025-01-13-17-57-45_7733_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Twist\nC. Pull\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7849_hand.jpg", + "images/2025-01-13-17-57-45_7852_hand.jpg", + "images/2025-01-13-17-57-45_7855_hand.jpg", + "images/2025-01-13-17-57-45_7858_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Approach\nB. Nudge\nC. Lift\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7923_hand.jpg", + "images/2025-01-13-17-57-45_7926_hand.jpg", + "images/2025-01-13-17-57-45_7929_hand.jpg", + "images/2025-01-13-17-57-45_7932_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Grasp\nC. Twist\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Grasp" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8022_hand.jpg", + "images/2025-01-13-17-57-45_8025_hand.jpg", + "images/2025-01-13-17-57-45_8028_hand.jpg", + "images/2025-01-13-17-57-45_8031_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Lift\nB. Pull\nC. Nudge\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Lift" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8122_hand.jpg", + "images/2025-01-13-17-57-45_8125_hand.jpg", + "images/2025-01-13-17-57-45_8128_hand.jpg", + "images/2025-01-13-17-57-45_8131_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Release\nB. Align\nC. Approach\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Approach" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8230_hand.jpg", + "images/2025-01-13-17-57-45_8233_hand.jpg", + "images/2025-01-13-17-57-45_8236_hand.jpg", + "images/2025-01-13-17-57-45_8239_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Twist\nB. Push\nC. Align\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8515_hand.jpg", + "images/2025-01-13-17-57-45_8518_hand.jpg", + "images/2025-01-13-17-57-45_8521_hand.jpg", + "images/2025-01-13-17-57-45_8524_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Release\nB. Align\nC. Pull\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8713_hand.jpg", + "images/2025-01-13-17-57-45_8716_hand.jpg", + "images/2025-01-13-17-57-45_8719_hand.jpg", + "images/2025-01-13-17-57-45_8722_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Grasp\nB. Nudge\nC. Twist\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Grasp" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8803_hand.jpg", + "images/2025-01-13-17-57-45_8806_hand.jpg", + "images/2025-01-13-17-57-45_8809_hand.jpg", + "images/2025-01-13-17-57-45_8812_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Lift\nB. Release\nC. Twist\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Lift" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8892_hand.jpg", + "images/2025-01-13-17-57-45_8895_hand.jpg", + "images/2025-01-13-17-57-45_8898_hand.jpg", + "images/2025-01-13-17-57-45_8901_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Push\nB. Release\nC. Lift\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Approach" + } + ], + "images": [ + "images/2025-01-13-17-57-45_9003_hand.jpg", + "images/2025-01-13-17-57-45_9006_hand.jpg", + "images/2025-01-13-17-57-45_9009_hand.jpg", + "images/2025-01-13-17-57-45_9012_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Twist\nB. Lift\nC. Pull\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_9148_hand.jpg", + "images/2025-01-13-17-57-45_9151_hand.jpg", + "images/2025-01-13-17-57-45_9154_hand.jpg", + "images/2025-01-13-17-57-45_9157_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Grasp\nB. Push\nC. Lift\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_9200_hand.jpg", + "images/2025-01-13-17-57-45_9203_hand.jpg", + "images/2025-01-13-17-57-45_9206_hand.jpg", + "images/2025-01-13-17-57-45_9209_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Push\nB. Align\nC. Nudge\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Grasp" + } + ], + "images": [ + "images/2025-01-13-17-57-45_9301_hand.jpg", + "images/2025-01-13-17-57-45_9304_hand.jpg", + "images/2025-01-13-17-57-45_9307_hand.jpg", + "images/2025-01-13-17-57-45_9310_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Lift\nB. Align\nC. Pull\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Lift" + } + ], + "images": [ + "images/2025-01-13-17-57-45_9400_hand.jpg", + "images/2025-01-13-17-57-45_9403_hand.jpg", + "images/2025-01-13-17-57-45_9406_hand.jpg", + "images/2025-01-13-17-57-45_9409_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Pull\nC. Release\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Approach" + } + ], + "images": [ + "images/2025-01-13-17-57-45_9490_hand.jpg", + "images/2025-01-13-17-57-45_9493_hand.jpg", + "images/2025-01-13-17-57-45_9496_hand.jpg", + "images/2025-01-13-17-57-45_9499_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Nudge\nC. Lift\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_9772_hand.jpg", + "images/2025-01-13-17-57-45_9775_hand.jpg", + "images/2025-01-13-17-57-45_9778_hand.jpg", + "images/2025-01-13-17-57-45_9781_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Push\nB. Grasp\nC. Lift\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Approach" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10047_hand.jpg", + "images/2025-01-13-17-57-45_10050_hand.jpg", + "images/2025-01-13-17-57-45_10053_hand.jpg", + "images/2025-01-13-17-57-45_10056_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Nudge\nB. Twist\nC. Push\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10286_hand.jpg", + "images/2025-01-13-17-57-45_10289_hand.jpg", + "images/2025-01-13-17-57-45_10292_hand.jpg", + "images/2025-01-13-17-57-45_10295_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Grasp\nB. Approach\nC. Release\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10460_hand.jpg", + "images/2025-01-13-17-57-45_10463_hand.jpg", + "images/2025-01-13-17-57-45_10466_hand.jpg", + "images/2025-01-13-17-57-45_10469_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Release\nB. Nudge\nC. Lift\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Grasp" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10550_hand.jpg", + "images/2025-01-13-17-57-45_10553_hand.jpg", + "images/2025-01-13-17-57-45_10556_hand.jpg", + "images/2025-01-13-17-57-45_10559_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Approach\nB. Nudge\nC. Align\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Lift" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10644_hand.jpg", + "images/2025-01-13-17-57-45_10647_hand.jpg", + "images/2025-01-13-17-57-45_10650_hand.jpg", + "images/2025-01-13-17-57-45_10653_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Pull\nB. Nudge\nC. Approach\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Approach" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10701_hand.jpg", + "images/2025-01-13-17-57-45_10704_hand.jpg", + "images/2025-01-13-17-57-45_10707_hand.jpg", + "images/2025-01-13-17-57-45_10710_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Nudge\nB. Release\nC. Push\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10756_hand.jpg", + "images/2025-01-13-17-57-45_10759_hand.jpg", + "images/2025-01-13-17-57-45_10762_hand.jpg", + "images/2025-01-13-17-57-45_10765_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Twist\nB. Pull\nC. Nudge\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Grasp" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10824_hand.jpg", + "images/2025-01-13-17-57-45_10827_hand.jpg", + "images/2025-01-13-17-57-45_10830_hand.jpg", + "images/2025-01-13-17-57-45_10833_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Nudge\nB. Push\nC. Lift\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Lift" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10890_hand.jpg", + "images/2025-01-13-17-57-45_10893_hand.jpg", + "images/2025-01-13-17-57-45_10896_hand.jpg", + "images/2025-01-13-17-57-45_10899_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Release\nC. Nudge\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Approach" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10993_hand.jpg", + "images/2025-01-13-17-57-45_10996_hand.jpg", + "images/2025-01-13-17-57-45_10999_hand.jpg", + "images/2025-01-13-17-57-45_11002_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Grasp\nB. Nudge\nC. Align\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11124_hand.jpg", + "images/2025-01-13-17-57-45_11127_hand.jpg", + "images/2025-01-13-17-57-45_11130_hand.jpg", + "images/2025-01-13-17-57-45_11133_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Push\nB. Release\nC. Align\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Push" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11290_hand.jpg", + "images/2025-01-13-17-57-45_11293_hand.jpg", + "images/2025-01-13-17-57-45_11296_hand.jpg", + "images/2025-01-13-17-57-45_11299_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Lift\nB. Nudge\nC. Grasp\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Grasp" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11518_hand.jpg", + "images/2025-01-13-17-57-45_11521_hand.jpg", + "images/2025-01-13-17-57-45_11524_hand.jpg", + "images/2025-01-13-17-57-45_11527_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Lift\nB. Push\nC. Pull\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Lift" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11620_hand.jpg", + "images/2025-01-13-17-57-45_11623_hand.jpg", + "images/2025-01-13-17-57-45_11626_hand.jpg", + "images/2025-01-13-17-57-45_11629_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Lift\nB. Grasp\nC. Approach\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Approach" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11726_hand.jpg", + "images/2025-01-13-17-57-45_11729_hand.jpg", + "images/2025-01-13-17-57-45_11732_hand.jpg", + "images/2025-01-13-17-57-45_11735_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Grasp\nB. Align\nC. Approach\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11903_hand.jpg", + "images/2025-01-13-17-57-45_11906_hand.jpg", + "images/2025-01-13-17-57-45_11909_hand.jpg", + "images/2025-01-13-17-57-45_11912_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Push\nB. Twist\nC. Approach\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Twist" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12058_hand.jpg", + "images/2025-01-13-17-57-45_12061_hand.jpg", + "images/2025-01-13-17-57-45_12064_hand.jpg", + "images/2025-01-13-17-57-45_12067_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Release\nC. Pull\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12148_hand.jpg", + "images/2025-01-13-17-57-45_12151_hand.jpg", + "images/2025-01-13-17-57-45_12154_hand.jpg", + "images/2025-01-13-17-57-45_12157_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Twist\nB. Grasp\nC. Nudge\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Grasp" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12250_hand.jpg", + "images/2025-01-13-17-57-45_12253_hand.jpg", + "images/2025-01-13-17-57-45_12256_hand.jpg", + "images/2025-01-13-17-57-45_12259_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Push\nB. Approach\nC. Lift\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Lift" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12352_hand.jpg", + "images/2025-01-13-17-57-45_12355_hand.jpg", + "images/2025-01-13-17-57-45_12358_hand.jpg", + "images/2025-01-13-17-57-45_12361_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Release\nB. Align\nC. Approach\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Approach" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12419_hand.jpg", + "images/2025-01-13-17-57-45_12422_hand.jpg", + "images/2025-01-13-17-57-45_12425_hand.jpg", + "images/2025-01-13-17-57-45_12428_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Lift\nB. Align\nC. Release\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12527_hand.jpg", + "images/2025-01-13-17-57-45_12530_hand.jpg", + "images/2025-01-13-17-57-45_12533_hand.jpg", + "images/2025-01-13-17-57-45_12536_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Grasp\nC. Pull\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Approach" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12636_hand.jpg", + "images/2025-01-13-17-57-45_12639_hand.jpg", + "images/2025-01-13-17-57-45_12642_hand.jpg", + "images/2025-01-13-17-57-45_12645_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Release\nB. Lift\nC. Push\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12696_hand.jpg", + "images/2025-01-13-17-57-45_12699_hand.jpg", + "images/2025-01-13-17-57-45_12702_hand.jpg", + "images/2025-01-13-17-57-45_12705_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Nudge\nC. Grasp\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Grasp" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12776_hand.jpg", + "images/2025-01-13-17-57-45_12779_hand.jpg", + "images/2025-01-13-17-57-45_12782_hand.jpg", + "images/2025-01-13-17-57-45_12785_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Lift\nB. Twist\nC. Pull\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Lift" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12857_hand.jpg", + "images/2025-01-13-17-57-45_12860_hand.jpg", + "images/2025-01-13-17-57-45_12863_hand.jpg", + "images/2025-01-13-17-57-45_12866_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Push\nB. Nudge\nC. Pull\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Approach" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12922_hand.jpg", + "images/2025-01-13-17-57-45_12925_hand.jpg", + "images/2025-01-13-17-57-45_12928_hand.jpg", + "images/2025-01-13-17-57-45_12931_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Pull\nB. Approach\nC. Align\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_13010_hand.jpg", + "images/2025-01-13-17-57-45_13013_hand.jpg", + "images/2025-01-13-17-57-45_13016_hand.jpg", + "images/2025-01-13-17-57-45_13019_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Grasp\nB. Twist\nC. Lift\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_13055_hand.jpg", + "images/2025-01-13-17-57-45_13058_hand.jpg", + "images/2025-01-13-17-57-45_13061_hand.jpg", + "images/2025-01-13-17-57-45_13064_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Pick D-SUB', what low-level action should happen next?\nOptions:\nA. Lift\nB. Pull\nC. Release\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Lift" + } + ], + "images": [ + "images/2025-01-13-17-57-45_344_hand.jpg", + "images/2025-01-13-17-57-45_347_hand.jpg", + "images/2025-01-13-17-57-45_350_hand.jpg", + "images/2025-01-13-17-57-45_353_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Pick D-SUB', what low-level action should happen next?\nOptions:\nA. Twist\nB. Nudge\nC. Lift\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Lift" + } + ], + "images": [ + "images/2025-01-13-17-57-45_629_hand.jpg", + "images/2025-01-13-17-57-45_632_hand.jpg", + "images/2025-01-13-17-57-45_635_hand.jpg", + "images/2025-01-13-17-57-45_638_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert D-SUB', what low-level action should happen next?\nOptions:\nA. Pull\nB. Align\nC. Nudge\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_897_hand.jpg", + "images/2025-01-13-17-57-45_900_hand.jpg", + "images/2025-01-13-17-57-45_903_hand.jpg", + "images/2025-01-13-17-57-45_906_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert D-SUB', what low-level action should happen next?\nOptions:\nA. Release\nB. Twist\nC. Align\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_1697_hand.jpg", + "images/2025-01-13-17-57-45_1700_hand.jpg", + "images/2025-01-13-17-57-45_1703_hand.jpg", + "images/2025-01-13-17-57-45_1706_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert D-SUB', what low-level action should happen next?\nOptions:\nA. Release\nB. Twist\nC. Push\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_1973_hand.jpg", + "images/2025-01-13-17-57-45_1976_hand.jpg", + "images/2025-01-13-17-57-45_1979_hand.jpg", + "images/2025-01-13-17-57-45_1982_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Pick BNC', what low-level action should happen next?\nOptions:\nA. Lift\nB. Push\nC. Approach\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Lift" + } + ], + "images": [ + "images/2025-01-13-17-57-45_2297_hand.jpg", + "images/2025-01-13-17-57-45_2300_hand.jpg", + "images/2025-01-13-17-57-45_2303_hand.jpg", + "images/2025-01-13-17-57-45_2306_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert BNC', what low-level action should happen next?\nOptions:\nA. Align\nB. Twist\nC. Push\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_2509_hand.jpg", + "images/2025-01-13-17-57-45_2512_hand.jpg", + "images/2025-01-13-17-57-45_2515_hand.jpg", + "images/2025-01-13-17-57-45_2518_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert BNC', what low-level action should happen next?\nOptions:\nA. Nudge\nB. Push\nC. Lift\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Twist" + } + ], + "images": [ + "images/2025-01-13-17-57-45_2611_hand.jpg", + "images/2025-01-13-17-57-45_2614_hand.jpg", + "images/2025-01-13-17-57-45_2617_hand.jpg", + "images/2025-01-13-17-57-45_2620_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert BNC', what low-level action should happen next?\nOptions:\nA. Align\nB. Nudge\nC. Lift\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_2864_hand.jpg", + "images/2025-01-13-17-57-45_2867_hand.jpg", + "images/2025-01-13-17-57-45_2870_hand.jpg", + "images/2025-01-13-17-57-45_2873_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Pick Ethernet', what low-level action should happen next?\nOptions:\nA. Twist\nB. Nudge\nC. Push\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Lift" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3088_hand.jpg", + "images/2025-01-13-17-57-45_3091_hand.jpg", + "images/2025-01-13-17-57-45_3094_hand.jpg", + "images/2025-01-13-17-57-45_3097_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Place Ethernet', what low-level action should happen next?\nOptions:\nA. Nudge\nB. Release\nC. Lift\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3458_hand.jpg", + "images/2025-01-13-17-57-45_3461_hand.jpg", + "images/2025-01-13-17-57-45_3464_hand.jpg", + "images/2025-01-13-17-57-45_3467_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Pick Ethernet', what low-level action should happen next?\nOptions:\nA. Lift\nB. Nudge\nC. Release\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Lift" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3635_hand.jpg", + "images/2025-01-13-17-57-45_3638_hand.jpg", + "images/2025-01-13-17-57-45_3641_hand.jpg", + "images/2025-01-13-17-57-45_3644_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert Ethernet', what low-level action should happen next?\nOptions:\nA. Grasp\nB. Align\nC. Lift\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3846_hand.jpg", + "images/2025-01-13-17-57-45_3849_hand.jpg", + "images/2025-01-13-17-57-45_3852_hand.jpg", + "images/2025-01-13-17-57-45_3855_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert Ethernet', what low-level action should happen next?\nOptions:\nA. Lift\nB. Approach\nC. Release\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4015_hand.jpg", + "images/2025-01-13-17-57-45_4018_hand.jpg", + "images/2025-01-13-17-57-45_4021_hand.jpg", + "images/2025-01-13-17-57-45_4024_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Pick large gear', what low-level action should happen next?\nOptions:\nA. Approach\nB. Nudge\nC. Lift\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Lift" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4208_hand.jpg", + "images/2025-01-13-17-57-45_4211_hand.jpg", + "images/2025-01-13-17-57-45_4214_hand.jpg", + "images/2025-01-13-17-57-45_4217_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert large gear', what low-level action should happen next?\nOptions:\nA. Release\nB. Nudge\nC. Lift\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4381_hand.jpg", + "images/2025-01-13-17-57-45_4384_hand.jpg", + "images/2025-01-13-17-57-45_4387_hand.jpg", + "images/2025-01-13-17-57-45_4390_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert large gear', what low-level action should happen next?\nOptions:\nA. Pull\nB. Lift\nC. Approach\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Push" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4485_hand.jpg", + "images/2025-01-13-17-57-45_4488_hand.jpg", + "images/2025-01-13-17-57-45_4491_hand.jpg", + "images/2025-01-13-17-57-45_4494_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Pick medium gear', what low-level action should happen next?\nOptions:\nA. Twist\nB. Release\nC. Lift\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Lift" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4704_hand.jpg", + "images/2025-01-13-17-57-45_4707_hand.jpg", + "images/2025-01-13-17-57-45_4710_hand.jpg", + "images/2025-01-13-17-57-45_4713_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert medium gear', what low-level action should happen next?\nOptions:\nA. Release\nB. Lift\nC. Align\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4872_hand.jpg", + "images/2025-01-13-17-57-45_4875_hand.jpg", + "images/2025-01-13-17-57-45_4878_hand.jpg", + "images/2025-01-13-17-57-45_4881_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert medium gear', what low-level action should happen next?\nOptions:\nA. Release\nB. Lift\nC. Twist\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5055_hand.jpg", + "images/2025-01-13-17-57-45_5058_hand.jpg", + "images/2025-01-13-17-57-45_5061_hand.jpg", + "images/2025-01-13-17-57-45_5064_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Pick small gear', what low-level action should happen next?\nOptions:\nA. Push\nB. Nudge\nC. Lift\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Lift" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5228_hand.jpg", + "images/2025-01-13-17-57-45_5231_hand.jpg", + "images/2025-01-13-17-57-45_5234_hand.jpg", + "images/2025-01-13-17-57-45_5237_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert small gear', what low-level action should happen next?\nOptions:\nA. Lift\nB. Push\nC. Release\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5401_hand.jpg", + "images/2025-01-13-17-57-45_5404_hand.jpg", + "images/2025-01-13-17-57-45_5407_hand.jpg", + "images/2025-01-13-17-57-45_5410_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert small gear', what low-level action should happen next?\nOptions:\nA. Approach\nB. Push\nC. Release\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Push" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5536_hand.jpg", + "images/2025-01-13-17-57-45_5539_hand.jpg", + "images/2025-01-13-17-57-45_5542_hand.jpg", + "images/2025-01-13-17-57-45_5545_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Pick USB', what low-level action should happen next?\nOptions:\nA. Pull\nB. Push\nC. Release\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Lift" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5968_hand.jpg", + "images/2025-01-13-17-57-45_5971_hand.jpg", + "images/2025-01-13-17-57-45_5974_hand.jpg", + "images/2025-01-13-17-57-45_5977_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert USB', what low-level action should happen next?\nOptions:\nA. Release\nB. Push\nC. Align\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6198_hand.jpg", + "images/2025-01-13-17-57-45_6201_hand.jpg", + "images/2025-01-13-17-57-45_6204_hand.jpg", + "images/2025-01-13-17-57-45_6207_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert USB', what low-level action should happen next?\nOptions:\nA. Approach\nB. Pull\nC. Lift\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6302_hand.jpg", + "images/2025-01-13-17-57-45_6305_hand.jpg", + "images/2025-01-13-17-57-45_6308_hand.jpg", + "images/2025-01-13-17-57-45_6311_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Pick round peg 4', what low-level action should happen next?\nOptions:\nA. Nudge\nB. Lift\nC. Twist\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Lift" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6457_hand.jpg", + "images/2025-01-13-17-57-45_6460_hand.jpg", + "images/2025-01-13-17-57-45_6463_hand.jpg", + "images/2025-01-13-17-57-45_6466_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert round peg 4', what low-level action should happen next?\nOptions:\nA. Twist\nB. Align\nC. Grasp\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6620_hand.jpg", + "images/2025-01-13-17-57-45_6623_hand.jpg", + "images/2025-01-13-17-57-45_6626_hand.jpg", + "images/2025-01-13-17-57-45_6629_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert round peg 4', what low-level action should happen next?\nOptions:\nA. Pull\nB. Release\nC. Nudge\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6728_hand.jpg", + "images/2025-01-13-17-57-45_6731_hand.jpg", + "images/2025-01-13-17-57-45_6734_hand.jpg", + "images/2025-01-13-17-57-45_6737_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Pick round peg 3', what low-level action should happen next?\nOptions:\nA. Approach\nB. Push\nC. Pull\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Lift" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6864_hand.jpg", + "images/2025-01-13-17-57-45_6867_hand.jpg", + "images/2025-01-13-17-57-45_6870_hand.jpg", + "images/2025-01-13-17-57-45_6873_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert round peg 3', what low-level action should happen next?\nOptions:\nA. Nudge\nB. Push\nC. Twist\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7015_hand.jpg", + "images/2025-01-13-17-57-45_7018_hand.jpg", + "images/2025-01-13-17-57-45_7021_hand.jpg", + "images/2025-01-13-17-57-45_7024_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert round peg 3', what low-level action should happen next?\nOptions:\nA. Push\nB. Lift\nC. Release\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7083_hand.jpg", + "images/2025-01-13-17-57-45_7086_hand.jpg", + "images/2025-01-13-17-57-45_7089_hand.jpg", + "images/2025-01-13-17-57-45_7092_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Pick round peg 2', what low-level action should happen next?\nOptions:\nA. Nudge\nB. Lift\nC. Release\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Lift" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7299_hand.jpg", + "images/2025-01-13-17-57-45_7302_hand.jpg", + "images/2025-01-13-17-57-45_7305_hand.jpg", + "images/2025-01-13-17-57-45_7308_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert round peg 2', what low-level action should happen next?\nOptions:\nA. Release\nB. Align\nC. Twist\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7430_hand.jpg", + "images/2025-01-13-17-57-45_7433_hand.jpg", + "images/2025-01-13-17-57-45_7436_hand.jpg", + "images/2025-01-13-17-57-45_7439_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert round peg 2', what low-level action should happen next?\nOptions:\nA. Release\nB. Lift\nC. Twist\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7490_hand.jpg", + "images/2025-01-13-17-57-45_7493_hand.jpg", + "images/2025-01-13-17-57-45_7496_hand.jpg", + "images/2025-01-13-17-57-45_7499_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Pick round peg 1', what low-level action should happen next?\nOptions:\nA. Nudge\nB. Twist\nC. Pull\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Lift" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7602_hand.jpg", + "images/2025-01-13-17-57-45_7605_hand.jpg", + "images/2025-01-13-17-57-45_7608_hand.jpg", + "images/2025-01-13-17-57-45_7611_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert round peg 1', what low-level action should happen next?\nOptions:\nA. Align\nB. Release\nC. Pull\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7745_hand.jpg", + "images/2025-01-13-17-57-45_7748_hand.jpg", + "images/2025-01-13-17-57-45_7751_hand.jpg", + "images/2025-01-13-17-57-45_7754_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert round peg 1', what low-level action should happen next?\nOptions:\nA. Approach\nB. Twist\nC. Release\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7872_hand.jpg", + "images/2025-01-13-17-57-45_7875_hand.jpg", + "images/2025-01-13-17-57-45_7878_hand.jpg", + "images/2025-01-13-17-57-45_7881_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Pick square peg 1', what low-level action should happen next?\nOptions:\nA. Approach\nB. Release\nC. Lift\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Lift" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8053_hand.jpg", + "images/2025-01-13-17-57-45_8056_hand.jpg", + "images/2025-01-13-17-57-45_8059_hand.jpg", + "images/2025-01-13-17-57-45_8062_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert square peg 1', what low-level action should happen next?\nOptions:\nA. Push\nB. Align\nC. Grasp\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8263_hand.jpg", + "images/2025-01-13-17-57-45_8266_hand.jpg", + "images/2025-01-13-17-57-45_8269_hand.jpg", + "images/2025-01-13-17-57-45_8272_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert square peg 1', what low-level action should happen next?\nOptions:\nA. Nudge\nB. Lift\nC. Release\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8581_hand.jpg", + "images/2025-01-13-17-57-45_8584_hand.jpg", + "images/2025-01-13-17-57-45_8587_hand.jpg", + "images/2025-01-13-17-57-45_8590_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Pick square peg 2', what low-level action should happen next?\nOptions:\nA. Lift\nB. Align\nC. Pull\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Lift" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8832_hand.jpg", + "images/2025-01-13-17-57-45_8835_hand.jpg", + "images/2025-01-13-17-57-45_8838_hand.jpg", + "images/2025-01-13-17-57-45_8841_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert square peg 2', what low-level action should happen next?\nOptions:\nA. Grasp\nB. Lift\nC. Align\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_9038_hand.jpg", + "images/2025-01-13-17-57-45_9041_hand.jpg", + "images/2025-01-13-17-57-45_9044_hand.jpg", + "images/2025-01-13-17-57-45_9047_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert square peg 2', what low-level action should happen next?\nOptions:\nA. Release\nB. Push\nC. Twist\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_9162_hand.jpg", + "images/2025-01-13-17-57-45_9165_hand.jpg", + "images/2025-01-13-17-57-45_9168_hand.jpg", + "images/2025-01-13-17-57-45_9171_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Pick square peg 3', what low-level action should happen next?\nOptions:\nA. Lift\nB. Nudge\nC. Pull\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Lift" + } + ], + "images": [ + "images/2025-01-13-17-57-45_9332_hand.jpg", + "images/2025-01-13-17-57-45_9335_hand.jpg", + "images/2025-01-13-17-57-45_9338_hand.jpg", + "images/2025-01-13-17-57-45_9341_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert square peg 3', what low-level action should happen next?\nOptions:\nA. Lift\nB. Nudge\nC. Push\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_9517_hand.jpg", + "images/2025-01-13-17-57-45_9520_hand.jpg", + "images/2025-01-13-17-57-45_9523_hand.jpg", + "images/2025-01-13-17-57-45_9526_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert square peg 3', what low-level action should happen next?\nOptions:\nA. Twist\nB. Nudge\nC. Pull\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10072_hand.jpg", + "images/2025-01-13-17-57-45_10075_hand.jpg", + "images/2025-01-13-17-57-45_10078_hand.jpg", + "images/2025-01-13-17-57-45_10081_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert square peg 3', what low-level action should happen next?\nOptions:\nA. Approach\nB. Grasp\nC. Push\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10344_hand.jpg", + "images/2025-01-13-17-57-45_10347_hand.jpg", + "images/2025-01-13-17-57-45_10350_hand.jpg", + "images/2025-01-13-17-57-45_10353_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Pick waterproof', what low-level action should happen next?\nOptions:\nA. Push\nB. Nudge\nC. Lift\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Lift" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10579_hand.jpg", + "images/2025-01-13-17-57-45_10582_hand.jpg", + "images/2025-01-13-17-57-45_10585_hand.jpg", + "images/2025-01-13-17-57-45_10588_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Place waterproof', what low-level action should happen next?\nOptions:\nA. Align\nB. Grasp\nC. Release\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10717_hand.jpg", + "images/2025-01-13-17-57-45_10720_hand.jpg", + "images/2025-01-13-17-57-45_10723_hand.jpg", + "images/2025-01-13-17-57-45_10726_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Pick waterproof', what low-level action should happen next?\nOptions:\nA. Twist\nB. Nudge\nC. Lift\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Lift" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10844_hand.jpg", + "images/2025-01-13-17-57-45_10847_hand.jpg", + "images/2025-01-13-17-57-45_10850_hand.jpg", + "images/2025-01-13-17-57-45_10853_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert waterproof', what low-level action should happen next?\nOptions:\nA. Align\nB. Push\nC. Grasp\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11028_hand.jpg", + "images/2025-01-13-17-57-45_11031_hand.jpg", + "images/2025-01-13-17-57-45_11034_hand.jpg", + "images/2025-01-13-17-57-45_11037_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert waterproof', what low-level action should happen next?\nOptions:\nA. Pull\nB. Twist\nC. Push\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Push" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11135_hand.jpg", + "images/2025-01-13-17-57-45_11138_hand.jpg", + "images/2025-01-13-17-57-45_11141_hand.jpg", + "images/2025-01-13-17-57-45_11144_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Pick bolt 4', what low-level action should happen next?\nOptions:\nA. Approach\nB. Lift\nC. Push\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Lift" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11552_hand.jpg", + "images/2025-01-13-17-57-45_11555_hand.jpg", + "images/2025-01-13-17-57-45_11558_hand.jpg", + "images/2025-01-13-17-57-45_11561_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert bolt 4', what low-level action should happen next?\nOptions:\nA. Nudge\nB. Grasp\nC. Release\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11761_hand.jpg", + "images/2025-01-13-17-57-45_11764_hand.jpg", + "images/2025-01-13-17-57-45_11767_hand.jpg", + "images/2025-01-13-17-57-45_11770_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert bolt 4', what low-level action should happen next?\nOptions:\nA. Twist\nB. Release\nC. Pull\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Twist" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11929_hand.jpg", + "images/2025-01-13-17-57-45_11932_hand.jpg", + "images/2025-01-13-17-57-45_11935_hand.jpg", + "images/2025-01-13-17-57-45_11938_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert bolt 4', what low-level action should happen next?\nOptions:\nA. Align\nB. Approach\nC. Grasp\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12086_hand.jpg", + "images/2025-01-13-17-57-45_12089_hand.jpg", + "images/2025-01-13-17-57-45_12092_hand.jpg", + "images/2025-01-13-17-57-45_12095_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Pick square peg 4', what low-level action should happen next?\nOptions:\nA. Release\nB. Twist\nC. Lift\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Lift" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12282_hand.jpg", + "images/2025-01-13-17-57-45_12285_hand.jpg", + "images/2025-01-13-17-57-45_12288_hand.jpg", + "images/2025-01-13-17-57-45_12291_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert square peg 4', what low-level action should happen next?\nOptions:\nA. Release\nB. Pull\nC. Push\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12438_hand.jpg", + "images/2025-01-13-17-57-45_12441_hand.jpg", + "images/2025-01-13-17-57-45_12444_hand.jpg", + "images/2025-01-13-17-57-45_12447_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Place square peg 4', what low-level action should happen next?\nOptions:\nA. Push\nB. Nudge\nC. Release\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12654_hand.jpg", + "images/2025-01-13-17-57-45_12657_hand.jpg", + "images/2025-01-13-17-57-45_12660_hand.jpg", + "images/2025-01-13-17-57-45_12663_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Pick square peg 4', what low-level action should happen next?\nOptions:\nA. Twist\nB. Lift\nC. Approach\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Lift" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12801_hand.jpg", + "images/2025-01-13-17-57-45_12804_hand.jpg", + "images/2025-01-13-17-57-45_12807_hand.jpg", + "images/2025-01-13-17-57-45_12810_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert square peg 4', what low-level action should happen next?\nOptions:\nA. Align\nB. Nudge\nC. Lift\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12941_hand.jpg", + "images/2025-01-13-17-57-45_12944_hand.jpg", + "images/2025-01-13-17-57-45_12947_hand.jpg", + "images/2025-01-13-17-57-45_12950_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert square peg 4', what low-level action should happen next?\nOptions:\nA. Twist\nB. Release\nC. Grasp\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Release" + } + ], + "images": [ + "images/2025-01-13-17-57-45_13021_hand.jpg", + "images/2025-01-13-17-57-45_13024_hand.jpg", + "images/2025-01-13-17-57-45_13027_hand.jpg", + "images/2025-01-13-17-57-45_13030_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the high-level action 'Insert D-SUB', which action best fills [MASK] in this local chain: Approach > [MASK] > Release?\nOptions:\nA. Pull\nB. Lift\nC. Nudge\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_1910_hand.jpg", + "images/2025-01-13-17-57-45_1913_hand.jpg", + "images/2025-01-13-17-57-45_1916_hand.jpg", + "images/2025-01-13-17-57-45_1919_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T12", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the high-level action 'Insert BNC', which action best fills [MASK] in this local chain: Approach > [MASK] > Twist?\nOptions:\nA. Twist\nB. Align\nC. Lift\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_2600_hand.jpg", + "images/2025-01-13-17-57-45_2603_hand.jpg", + "images/2025-01-13-17-57-45_2606_hand.jpg", + "images/2025-01-13-17-57-45_2609_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T12", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the high-level action 'Insert BNC', which action best fills [MASK] in this local chain: Align > [MASK] > Release?\nOptions:\nA. Push\nB. Twist\nC. Release\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Twist" + } + ], + "images": [ + "images/2025-01-13-17-57-45_2804_hand.jpg", + "images/2025-01-13-17-57-45_2807_hand.jpg", + "images/2025-01-13-17-57-45_2810_hand.jpg", + "images/2025-01-13-17-57-45_2813_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T12", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the high-level action 'Insert Ethernet', which action best fills [MASK] in this local chain: Approach > [MASK] > Release?\nOptions:\nA. Lift\nB. Release\nC. Align\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_3990_hand.jpg", + "images/2025-01-13-17-57-45_3993_hand.jpg", + "images/2025-01-13-17-57-45_3996_hand.jpg", + "images/2025-01-13-17-57-45_3999_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T12", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the high-level action 'Insert large gear', which action best fills [MASK] in this local chain: Approach > [MASK] > Push?\nOptions:\nA. Pull\nB. Approach\nC. Align\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_4470_hand.jpg", + "images/2025-01-13-17-57-45_4473_hand.jpg", + "images/2025-01-13-17-57-45_4476_hand.jpg", + "images/2025-01-13-17-57-45_4479_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T12", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the high-level action 'Insert medium gear', which action best fills [MASK] in this local chain: Approach > [MASK] > Release?\nOptions:\nA. Nudge\nB. Approach\nC. Lift\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5021_hand.jpg", + "images/2025-01-13-17-57-45_5024_hand.jpg", + "images/2025-01-13-17-57-45_5027_hand.jpg", + "images/2025-01-13-17-57-45_5030_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T12", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the high-level action 'Insert small gear', which action best fills [MASK] in this local chain: Approach > [MASK] > Push?\nOptions:\nA. Pull\nB. Approach\nC. Align\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_5516_hand.jpg", + "images/2025-01-13-17-57-45_5519_hand.jpg", + "images/2025-01-13-17-57-45_5522_hand.jpg", + "images/2025-01-13-17-57-45_5525_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T12", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the high-level action 'Insert USB', which action best fills [MASK] in this local chain: Approach > [MASK] > Release?\nOptions:\nA. Push\nB. Approach\nC. Align\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6295_hand.jpg", + "images/2025-01-13-17-57-45_6298_hand.jpg", + "images/2025-01-13-17-57-45_6301_hand.jpg", + "images/2025-01-13-17-57-45_6304_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T12", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the high-level action 'Insert round peg 4', which action best fills [MASK] in this local chain: Approach > [MASK] > Release?\nOptions:\nA. Align\nB. Nudge\nC. Push\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_6710_hand.jpg", + "images/2025-01-13-17-57-45_6713_hand.jpg", + "images/2025-01-13-17-57-45_6716_hand.jpg", + "images/2025-01-13-17-57-45_6719_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T12", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the high-level action 'Insert round peg 3', which action best fills [MASK] in this local chain: Approach > [MASK] > Release?\nOptions:\nA. Align\nB. Twist\nC. Lift\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7076_hand.jpg", + "images/2025-01-13-17-57-45_7079_hand.jpg", + "images/2025-01-13-17-57-45_7082_hand.jpg", + "images/2025-01-13-17-57-45_7085_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T12", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the high-level action 'Insert round peg 2', which action best fills [MASK] in this local chain: Approach > [MASK] > Release?\nOptions:\nA. Twist\nB. Grasp\nC. Pull\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7486_hand.jpg", + "images/2025-01-13-17-57-45_7489_hand.jpg", + "images/2025-01-13-17-57-45_7492_hand.jpg", + "images/2025-01-13-17-57-45_7495_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T12", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the high-level action 'Insert round peg 1', which action best fills [MASK] in this local chain: Approach > [MASK] > Release?\nOptions:\nA. Align\nB. Grasp\nC. Pull\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_7849_hand.jpg", + "images/2025-01-13-17-57-45_7852_hand.jpg", + "images/2025-01-13-17-57-45_7855_hand.jpg", + "images/2025-01-13-17-57-45_7858_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T12", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the high-level action 'Insert square peg 1', which action best fills [MASK] in this local chain: Approach > [MASK] > Release?\nOptions:\nA. Twist\nB. Release\nC. Align\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_8515_hand.jpg", + "images/2025-01-13-17-57-45_8518_hand.jpg", + "images/2025-01-13-17-57-45_8521_hand.jpg", + "images/2025-01-13-17-57-45_8524_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T12", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the high-level action 'Insert square peg 2', which action best fills [MASK] in this local chain: Approach > [MASK] > Release?\nOptions:\nA. Release\nB. Lift\nC. Twist\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_9148_hand.jpg", + "images/2025-01-13-17-57-45_9151_hand.jpg", + "images/2025-01-13-17-57-45_9154_hand.jpg", + "images/2025-01-13-17-57-45_9157_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T12", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the high-level action 'Insert square peg 3', which action best fills [MASK] in this local chain: Approach > [MASK] > Release?\nOptions:\nA. Push\nB. Release\nC. Grasp\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_10286_hand.jpg", + "images/2025-01-13-17-57-45_10289_hand.jpg", + "images/2025-01-13-17-57-45_10292_hand.jpg", + "images/2025-01-13-17-57-45_10295_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T12", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the high-level action 'Insert waterproof', which action best fills [MASK] in this local chain: Approach > [MASK] > Push?\nOptions:\nA. Approach\nB. Nudge\nC. Align\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11124_hand.jpg", + "images/2025-01-13-17-57-45_11127_hand.jpg", + "images/2025-01-13-17-57-45_11130_hand.jpg", + "images/2025-01-13-17-57-45_11133_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T12", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the high-level action 'Insert bolt 4', which action best fills [MASK] in this local chain: Approach > [MASK] > Twist?\nOptions:\nA. Align\nB. Pull\nC. Lift\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_11903_hand.jpg", + "images/2025-01-13-17-57-45_11906_hand.jpg", + "images/2025-01-13-17-57-45_11909_hand.jpg", + "images/2025-01-13-17-57-45_11912_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T12", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the high-level action 'Insert bolt 4', which action best fills [MASK] in this local chain: Align > [MASK] > Release?\nOptions:\nA. Twist\nB. Pull\nC. Grasp\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Twist" + } + ], + "images": [ + "images/2025-01-13-17-57-45_12058_hand.jpg", + "images/2025-01-13-17-57-45_12061_hand.jpg", + "images/2025-01-13-17-57-45_12064_hand.jpg", + "images/2025-01-13-17-57-45_12067_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T12", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the high-level action 'Insert square peg 4', which action best fills [MASK] in this local chain: Approach > [MASK] > Release?\nOptions:\nA. Pull\nB. Align\nC. Lift\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Align" + } + ], + "images": [ + "images/2025-01-13-17-57-45_13010_hand.jpg", + "images/2025-01-13-17-57-45_13013_hand.jpg", + "images/2025-01-13-17-57-45_13016_hand.jpg", + "images/2025-01-13-17-57-45_13019_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T12", + "task_id": "2025-01-13-17-57-45", + "split": "eval", + "split_group_id": "2025-01-13-17-57-45", + "recording_id": "2025-01-13-17-57-45", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. pre-approach\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. pre-approach" + } + ], + "images": [ + "images/2025-01-14-11-53-50_44_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_169_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. contact\nC. release\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. hold and carry" + } + ], + "images": [ + "images/2025-01-14-11-53-50_262_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. release\nC. pre-approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/2025-01-14-11-53-50_402_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_637_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. approach\nC. pre-approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_753_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_877_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. pre-approach\nC. release\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. hold and carry" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1005_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. contact\nC. pre-approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1084_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. pre-approach\nC. contact\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1199_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1279_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. release\nC. pre-approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1317_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. contact\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1390_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. release\nC. transfer\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. hold and carry" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1472_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1546_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. hold and carry\nC. approach\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1613_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. release\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1697_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. hold and carry\nC. contact\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. hold and carry" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1782_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. pre-approach\nC. transfer\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1897_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. pre-approach\nC. release\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2026_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. contact\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2123_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. pre-approach\nC. transfer\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2264_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. transfer\nC. contact\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2344_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. contact\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2405_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. release\nC. contact\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2582_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2756_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2897_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. release\nC. hold and carry\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. hold and carry" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2996_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. contact\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/2025-01-14-11-53-50_3010_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. transfer\nC. contact\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/2025-01-14-11-53-50_3107_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. pre-approach\nC. approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_3372_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. contact\nC. approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_3595_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. contact\nC. hold and carry\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_3688_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. hold and carry\nC. contact\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_3800_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. pre-approach\nC. release\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_3915_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. transfer\nC. contact\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_4053_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. contact\nC. pre-approach\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/2025-01-14-11-53-50_4154_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. release\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/2025-01-14-11-53-50_4266_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. release\nC. approach\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_4512_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. approach\nC. hold and carry\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_4737_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. release\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_4832_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. release\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_4950_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. contact\nC. approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_5112_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. pre-approach\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/2025-01-14-11-53-50_5179_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. release\nC. transfer\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/2025-01-14-11-53-50_5263_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. contact\nC. transfer\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_5494_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. transfer\nC. release\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/2025-01-14-11-53-50_5736_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. hold and carry\nC. approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_5857_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. hold and carry\nC. pre-approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_5911_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. contact\nC. pre-approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6008_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. pre-approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6142_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. hold and carry\nC. pre-approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. hold and carry" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6209_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. contact\nC. release\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6319_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. contact\nC. release\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6436_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. transfer\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6471_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. transfer\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6571_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. pre-approach\nC. hold and carry\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. hold and carry" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6675_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. contact\nC. pre-approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6791_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. transfer\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6947_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. transfer\nC. pre-approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7016_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. contact\nC. transfer\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7114_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. transfer\nC. pre-approach\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7208_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. approach\nC. transfer\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7276_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. approach\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7342_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. contact\nC. release\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7412_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. release\nC. transfer\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. hold and carry" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7482_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. transfer\nC. hold and carry\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7540_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. release\nC. approach\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7592_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. approach\nC. contact\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7659_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. release\nC. hold and carry\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. hold and carry" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7726_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. release\nC. pre-approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7818_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. hold and carry\nC. contact\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7932_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. pre-approach\nC. approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7978_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. transfer\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8005_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. hold and carry\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8085_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. hold and carry\nC. contact\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. hold and carry" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8166_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. pre-approach\nC. release\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8233_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8306_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. hold and carry\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8389_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. contact\nC. pre-approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. hold and carry" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8464_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. transfer\nC. release\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8587_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. contact\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8855_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. transfer\nC. release\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9102_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. contact\nC. transfer\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9331_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. release\nC. approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. hold and carry" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9478_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. release\nC. contact\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9550_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. transfer\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9618_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. pre-approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9710_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. pre-approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. hold and carry" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9803_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. contact\nC. approach\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9967_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. transfer\nC. approach\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10183_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. contact\nC. approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. transfer" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10310_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. hold and carry\nC. transfer\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10380_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. approach\nC. contact\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10496_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. contact\nC. pre-approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10619_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. transfer\nC. release\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10697_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. hold and carry\nC. approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10777_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. contact\nC. release\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10800_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. transfer\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10912_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. transfer\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11034_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. transfer\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11174_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. release\nC. approach\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11336_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. transfer\nC. contact\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11464_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. contact\nC. approach\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11563_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. contact\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11586_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. transfer\nC. release\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11679_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. transfer\nC. pre-approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11775_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. pre-approach\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11870_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. approach\nC. pre-approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11981_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. hold and carry\nC. transfer\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_12018_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. hold and carry\nC. pre-approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_12100_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. transfer\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. hold and carry" + } + ], + "images": [ + "images/2025-01-14-11-53-50_12180_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. approach\nC. hold and carry\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/2025-01-14-11-53-50_12322_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. approach\nC. pre-approach\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_12854_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. transfer\nC. pre-approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. transfer" + } + ], + "images": [ + "images/2025-01-14-11-53-50_13368_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. pre-approach\nC. release\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_13570_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. pre-approach\nC. transfer\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_13677_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. contact\nB. transfer\nC. pre-approach\nD. approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_13775_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. approach\nC. transfer\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/2025-01-14-11-53-50_13866_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. approach\nC. transfer\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. transfer" + } + ], + "images": [ + "images/2025-01-14-11-53-50_13985_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. pre-approach\nC. contact\nD. hold and carry\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14144_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. release\nB. contact\nC. hold and carry\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14205_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. transfer\nC. contact\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14312_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. hold and carry\nB. pre-approach\nC. transfer\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. hold and carry" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14419_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. contact\nC. release\nD. transfer\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. transfer" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14503_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. approach\nC. hold and carry\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14616_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. approach\nB. contact\nC. transfer\nD. pre-approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14717_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. pre-approach\nB. release\nC. transfer\nD. contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14792_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nWhich phase best describes the current robot manipulation state?\nOptions:\nA. transfer\nB. contact\nC. pre-approach\nD. release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. pre-approach" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14832_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T1", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_44_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_169_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_262_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_402_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_637_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_753_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_877_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1005_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1084_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1199_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1279_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1317_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1390_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1472_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1546_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1613_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1697_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1782_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1897_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2026_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2123_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2264_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2344_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2405_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2582_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2756_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2897_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2996_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_3010_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_3107_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_3372_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_3595_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_3688_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_3800_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_3915_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_4053_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_4154_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_4266_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_4512_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_4737_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_4832_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_4950_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_5112_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_5179_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_5263_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_5494_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_5736_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_5857_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_5911_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6008_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6142_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6209_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6319_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6436_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6471_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6571_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6675_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6791_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6947_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7016_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7114_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7208_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7276_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7342_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7412_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7482_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7540_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7592_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7659_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7726_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7818_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7932_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7978_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8005_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8085_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8166_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8233_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8306_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8389_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8464_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8587_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8855_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9102_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9331_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9478_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9550_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9618_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9710_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9803_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9967_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10183_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10310_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10380_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10496_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10619_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10697_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10777_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10800_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10912_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11034_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11174_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11336_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11464_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11563_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11586_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11679_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11775_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11870_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11981_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_12018_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_12100_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_12180_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_12322_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_12854_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — no contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_13368_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_13570_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_13677_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_13775_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_13866_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_13985_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14144_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14205_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14312_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14419_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14503_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14616_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14717_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. Yes — contact established\nB. No — no contact\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — contact established" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14792_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 1\nIs the end effector currently in contact with anything?\nOptions:\nA. No — no contact\nB. Yes — contact established\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — no contact" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14832_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T2", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_17_hand.jpg", + "images/2025-01-14-11-53-50_30_hand.jpg", + "images/2025-01-14-11-53-50_44_hand.jpg", + "images/2025-01-14-11-53-50_58_hand.jpg", + "images/2025-01-14-11-53-50_71_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_255_hand.jpg", + "images/2025-01-14-11-53-50_258_hand.jpg", + "images/2025-01-14-11-53-50_262_hand.jpg", + "images/2025-01-14-11-53-50_266_hand.jpg", + "images/2025-01-14-11-53-50_269_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_571_hand.jpg", + "images/2025-01-14-11-53-50_604_hand.jpg", + "images/2025-01-14-11-53-50_638_hand.jpg", + "images/2025-01-14-11-53-50_671_hand.jpg", + "images/2025-01-14-11-53-50_704_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_805_hand.jpg", + "images/2025-01-14-11-53-50_841_hand.jpg", + "images/2025-01-14-11-53-50_877_hand.jpg", + "images/2025-01-14-11-53-50_913_hand.jpg", + "images/2025-01-14-11-53-50_949_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1168_hand.jpg", + "images/2025-01-14-11-53-50_1184_hand.jpg", + "images/2025-01-14-11-53-50_1199_hand.jpg", + "images/2025-01-14-11-53-50_1214_hand.jpg", + "images/2025-01-14-11-53-50_1230_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1260_hand.jpg", + "images/2025-01-14-11-53-50_1270_hand.jpg", + "images/2025-01-14-11-53-50_1280_hand.jpg", + "images/2025-01-14-11-53-50_1289_hand.jpg", + "images/2025-01-14-11-53-50_1299_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1510_hand.jpg", + "images/2025-01-14-11-53-50_1528_hand.jpg", + "images/2025-01-14-11-53-50_1546_hand.jpg", + "images/2025-01-14-11-53-50_1564_hand.jpg", + "images/2025-01-14-11-53-50_1582_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1833_hand.jpg", + "images/2025-01-14-11-53-50_1865_hand.jpg", + "images/2025-01-14-11-53-50_1897_hand.jpg", + "images/2025-01-14-11-53-50_1929_hand.jpg", + "images/2025-01-14-11-53-50_1961_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2009_hand.jpg", + "images/2025-01-14-11-53-50_2018_hand.jpg", + "images/2025-01-14-11-53-50_2026_hand.jpg", + "images/2025-01-14-11-53-50_2035_hand.jpg", + "images/2025-01-14-11-53-50_2044_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2505_hand.jpg", + "images/2025-01-14-11-53-50_2544_hand.jpg", + "images/2025-01-14-11-53-50_2582_hand.jpg", + "images/2025-01-14-11-53-50_2621_hand.jpg", + "images/2025-01-14-11-53-50_2660_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_3266_hand.jpg", + "images/2025-01-14-11-53-50_3319_hand.jpg", + "images/2025-01-14-11-53-50_3372_hand.jpg", + "images/2025-01-14-11-53-50_3425_hand.jpg", + "images/2025-01-14-11-53-50_3478_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_3754_hand.jpg", + "images/2025-01-14-11-53-50_3777_hand.jpg", + "images/2025-01-14-11-53-50_3800_hand.jpg", + "images/2025-01-14-11-53-50_3823_hand.jpg", + "images/2025-01-14-11-53-50_3846_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_3994_hand.jpg", + "images/2025-01-14-11-53-50_4023_hand.jpg", + "images/2025-01-14-11-53-50_4053_hand.jpg", + "images/2025-01-14-11-53-50_4083_hand.jpg", + "images/2025-01-14-11-53-50_4112_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_4200_hand.jpg", + "images/2025-01-14-11-53-50_4233_hand.jpg", + "images/2025-01-14-11-53-50_4266_hand.jpg", + "images/2025-01-14-11-53-50_4300_hand.jpg", + "images/2025-01-14-11-53-50_4333_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_4424_hand.jpg", + "images/2025-01-14-11-53-50_4468_hand.jpg", + "images/2025-01-14-11-53-50_4512_hand.jpg", + "images/2025-01-14-11-53-50_4557_hand.jpg", + "images/2025-01-14-11-53-50_4601_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_4685_hand.jpg", + "images/2025-01-14-11-53-50_4711_hand.jpg", + "images/2025-01-14-11-53-50_4738_hand.jpg", + "images/2025-01-14-11-53-50_4764_hand.jpg", + "images/2025-01-14-11-53-50_4790_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_4884_hand.jpg", + "images/2025-01-14-11-53-50_4917_hand.jpg", + "images/2025-01-14-11-53-50_4950_hand.jpg", + "images/2025-01-14-11-53-50_4984_hand.jpg", + "images/2025-01-14-11-53-50_5017_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_5220_hand.jpg", + "images/2025-01-14-11-53-50_5242_hand.jpg", + "images/2025-01-14-11-53-50_5263_hand.jpg", + "images/2025-01-14-11-53-50_5284_hand.jpg", + "images/2025-01-14-11-53-50_5306_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_5392_hand.jpg", + "images/2025-01-14-11-53-50_5443_hand.jpg", + "images/2025-01-14-11-53-50_5494_hand.jpg", + "images/2025-01-14-11-53-50_5545_hand.jpg", + "images/2025-01-14-11-53-50_5596_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_5830_hand.jpg", + "images/2025-01-14-11-53-50_5844_hand.jpg", + "images/2025-01-14-11-53-50_5857_hand.jpg", + "images/2025-01-14-11-53-50_5870_hand.jpg", + "images/2025-01-14-11-53-50_5884_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_5955_hand.jpg", + "images/2025-01-14-11-53-50_5981_hand.jpg", + "images/2025-01-14-11-53-50_6008_hand.jpg", + "images/2025-01-14-11-53-50_6035_hand.jpg", + "images/2025-01-14-11-53-50_6061_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6261_hand.jpg", + "images/2025-01-14-11-53-50_6290_hand.jpg", + "images/2025-01-14-11-53-50_6319_hand.jpg", + "images/2025-01-14-11-53-50_6348_hand.jpg", + "images/2025-01-14-11-53-50_6377_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6421_hand.jpg", + "images/2025-01-14-11-53-50_6429_hand.jpg", + "images/2025-01-14-11-53-50_6436_hand.jpg", + "images/2025-01-14-11-53-50_6444_hand.jpg", + "images/2025-01-14-11-53-50_6452_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6515_hand.jpg", + "images/2025-01-14-11-53-50_6543_hand.jpg", + "images/2025-01-14-11-53-50_6571_hand.jpg", + "images/2025-01-14-11-53-50_6599_hand.jpg", + "images/2025-01-14-11-53-50_6627_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6728_hand.jpg", + "images/2025-01-14-11-53-50_6760_hand.jpg", + "images/2025-01-14-11-53-50_6792_hand.jpg", + "images/2025-01-14-11-53-50_6823_hand.jpg", + "images/2025-01-14-11-53-50_6855_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6914_hand.jpg", + "images/2025-01-14-11-53-50_6931_hand.jpg", + "images/2025-01-14-11-53-50_6948_hand.jpg", + "images/2025-01-14-11-53-50_6964_hand.jpg", + "images/2025-01-14-11-53-50_6981_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7062_hand.jpg", + "images/2025-01-14-11-53-50_7088_hand.jpg", + "images/2025-01-14-11-53-50_7114_hand.jpg", + "images/2025-01-14-11-53-50_7140_hand.jpg", + "images/2025-01-14-11-53-50_7166_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7589_hand.jpg", + "images/2025-01-14-11-53-50_7591_hand.jpg", + "images/2025-01-14-11-53-50_7592_hand.jpg", + "images/2025-01-14-11-53-50_7594_hand.jpg", + "images/2025-01-14-11-53-50_7596_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7766_hand.jpg", + "images/2025-01-14-11-53-50_7792_hand.jpg", + "images/2025-01-14-11-53-50_7818_hand.jpg", + "images/2025-01-14-11-53-50_7845_hand.jpg", + "images/2025-01-14-11-53-50_7871_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7914_hand.jpg", + "images/2025-01-14-11-53-50_7923_hand.jpg", + "images/2025-01-14-11-53-50_7932_hand.jpg", + "images/2025-01-14-11-53-50_7941_hand.jpg", + "images/2025-01-14-11-53-50_7950_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7968_hand.jpg", + "images/2025-01-14-11-53-50_7973_hand.jpg", + "images/2025-01-14-11-53-50_7978_hand.jpg", + "images/2025-01-14-11-53-50_7983_hand.jpg", + "images/2025-01-14-11-53-50_7988_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8042_hand.jpg", + "images/2025-01-14-11-53-50_8064_hand.jpg", + "images/2025-01-14-11-53-50_8086_hand.jpg", + "images/2025-01-14-11-53-50_8107_hand.jpg", + "images/2025-01-14-11-53-50_8129_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8516_hand.jpg", + "images/2025-01-14-11-53-50_8552_hand.jpg", + "images/2025-01-14-11-53-50_8588_hand.jpg", + "images/2025-01-14-11-53-50_8623_hand.jpg", + "images/2025-01-14-11-53-50_8659_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8759_hand.jpg", + "images/2025-01-14-11-53-50_8807_hand.jpg", + "images/2025-01-14-11-53-50_8856_hand.jpg", + "images/2025-01-14-11-53-50_8904_hand.jpg", + "images/2025-01-14-11-53-50_8952_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9512_hand.jpg", + "images/2025-01-14-11-53-50_9531_hand.jpg", + "images/2025-01-14-11-53-50_9550_hand.jpg", + "images/2025-01-14-11-53-50_9569_hand.jpg", + "images/2025-01-14-11-53-50_9588_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9871_hand.jpg", + "images/2025-01-14-11-53-50_9919_hand.jpg", + "images/2025-01-14-11-53-50_9967_hand.jpg", + "images/2025-01-14-11-53-50_10015_hand.jpg", + "images/2025-01-14-11-53-50_10063_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10143_hand.jpg", + "images/2025-01-14-11-53-50_10163_hand.jpg", + "images/2025-01-14-11-53-50_10183_hand.jpg", + "images/2025-01-14-11-53-50_10203_hand.jpg", + "images/2025-01-14-11-53-50_10223_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10428_hand.jpg", + "images/2025-01-14-11-53-50_10462_hand.jpg", + "images/2025-01-14-11-53-50_10496_hand.jpg", + "images/2025-01-14-11-53-50_10531_hand.jpg", + "images/2025-01-14-11-53-50_10565_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10768_hand.jpg", + "images/2025-01-14-11-53-50_10772_hand.jpg", + "images/2025-01-14-11-53-50_10777_hand.jpg", + "images/2025-01-14-11-53-50_10782_hand.jpg", + "images/2025-01-14-11-53-50_10786_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11411_hand.jpg", + "images/2025-01-14-11-53-50_11437_hand.jpg", + "images/2025-01-14-11-53-50_11464_hand.jpg", + "images/2025-01-14-11-53-50_11491_hand.jpg", + "images/2025-01-14-11-53-50_11517_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11555_hand.jpg", + "images/2025-01-14-11-53-50_11559_hand.jpg", + "images/2025-01-14-11-53-50_11563_hand.jpg", + "images/2025-01-14-11-53-50_11567_hand.jpg", + "images/2025-01-14-11-53-50_11571_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11627_hand.jpg", + "images/2025-01-14-11-53-50_11653_hand.jpg", + "images/2025-01-14-11-53-50_11680_hand.jpg", + "images/2025-01-14-11-53-50_11706_hand.jpg", + "images/2025-01-14-11-53-50_11732_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11964_hand.jpg", + "images/2025-01-14-11-53-50_11973_hand.jpg", + "images/2025-01-14-11-53-50_11981_hand.jpg", + "images/2025-01-14-11-53-50_11989_hand.jpg", + "images/2025-01-14-11-53-50_11998_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_12056_hand.jpg", + "images/2025-01-14-11-53-50_12078_hand.jpg", + "images/2025-01-14-11-53-50_12100_hand.jpg", + "images/2025-01-14-11-53-50_12122_hand.jpg", + "images/2025-01-14-11-53-50_12144_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_12175_hand.jpg", + "images/2025-01-14-11-53-50_12178_hand.jpg", + "images/2025-01-14-11-53-50_12180_hand.jpg", + "images/2025-01-14-11-53-50_12183_hand.jpg", + "images/2025-01-14-11-53-50_12186_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Actively moving (the arm is clearly in motion)\nB. Stationary (the arm is still or barely moving)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_12603_hand.jpg", + "images/2025-01-14-11-53-50_12728_hand.jpg", + "images/2025-01-14-11-53-50_12854_hand.jpg", + "images/2025-01-14-11-53-50_12980_hand.jpg", + "images/2025-01-14-11-53-50_13105_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_13516_hand.jpg", + "images/2025-01-14-11-53-50_13543_hand.jpg", + "images/2025-01-14-11-53-50_13570_hand.jpg", + "images/2025-01-14-11-53-50_13598_hand.jpg", + "images/2025-01-14-11-53-50_13625_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_13726_hand.jpg", + "images/2025-01-14-11-53-50_13751_hand.jpg", + "images/2025-01-14-11-53-50_13776_hand.jpg", + "images/2025-01-14-11-53-50_13800_hand.jpg", + "images/2025-01-14-11-53-50_13825_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_13919_hand.jpg", + "images/2025-01-14-11-53-50_13952_hand.jpg", + "images/2025-01-14-11-53-50_13986_hand.jpg", + "images/2025-01-14-11-53-50_14019_hand.jpg", + "images/2025-01-14-11-53-50_14052_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14111_hand.jpg", + "images/2025-01-14-11-53-50_14128_hand.jpg", + "images/2025-01-14-11-53-50_14144_hand.jpg", + "images/2025-01-14-11-53-50_14161_hand.jpg", + "images/2025-01-14-11-53-50_14178_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14201_hand.jpg", + "images/2025-01-14-11-53-50_14203_hand.jpg", + "images/2025-01-14-11-53-50_14205_hand.jpg", + "images/2025-01-14-11-53-50_14207_hand.jpg", + "images/2025-01-14-11-53-50_14209_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14250_hand.jpg", + "images/2025-01-14-11-53-50_14281_hand.jpg", + "images/2025-01-14-11-53-50_14312_hand.jpg", + "images/2025-01-14-11-53-50_14344_hand.jpg", + "images/2025-01-14-11-53-50_14375_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14593_hand.jpg", + "images/2025-01-14-11-53-50_14605_hand.jpg", + "images/2025-01-14-11-53-50_14616_hand.jpg", + "images/2025-01-14-11-53-50_14628_hand.jpg", + "images/2025-01-14-11-53-50_14640_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the arm is still or barely moving)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14678_hand.jpg", + "images/2025-01-14-11-53-50_14698_hand.jpg", + "images/2025-01-14-11-53-50_14718_hand.jpg", + "images/2025-01-14-11-53-50_14737_hand.jpg", + "images/2025-01-14-11-53-50_14757_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the robot arm actively moving or stationary?\nOptions:\nA. Stationary (the arm is still or barely moving)\nB. Actively moving (the arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the arm is clearly in motion)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14815_hand.jpg", + "images/2025-01-14-11-53-50_14824_hand.jpg", + "images/2025-01-14-11-53-50_14832_hand.jpg", + "images/2025-01-14-11-53-50_14841_hand.jpg", + "images/2025-01-14-11-53-50_14850_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T6", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-14-11-53-50_111_hand.jpg", + "images/2025-01-14-11-53-50_125_hand.jpg", + "images/2025-01-14-11-53-50_139_hand.jpg", + "images/2025-01-14-11-53-50_153_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-14-11-53-50_335_hand.jpg", + "images/2025-01-14-11-53-50_371_hand.jpg", + "images/2025-01-14-11-53-50_408_hand.jpg", + "images/2025-01-14-11-53-50_444_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-14-11-53-50_794_hand.jpg", + "images/2025-01-14-11-53-50_813_hand.jpg", + "images/2025-01-14-11-53-50_832_hand.jpg", + "images/2025-01-14-11-53-50_851_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1058_hand.jpg", + "images/2025-01-14-11-53-50_1080_hand.jpg", + "images/2025-01-14-11-53-50_1103_hand.jpg", + "images/2025-01-14-11-53-50_1126_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1344_hand.jpg", + "images/2025-01-14-11-53-50_1357_hand.jpg", + "images/2025-01-14-11-53-50_1369_hand.jpg", + "images/2025-01-14-11-53-50_1381_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1506_hand.jpg", + "images/2025-01-14-11-53-50_1516_hand.jpg", + "images/2025-01-14-11-53-50_1526_hand.jpg", + "images/2025-01-14-11-53-50_1536_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1646_hand.jpg", + "images/2025-01-14-11-53-50_1659_hand.jpg", + "images/2025-01-14-11-53-50_1672_hand.jpg", + "images/2025-01-14-11-53-50_1684_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1845_hand.jpg", + "images/2025-01-14-11-53-50_1875_hand.jpg", + "images/2025-01-14-11-53-50_1904_hand.jpg", + "images/2025-01-14-11-53-50_1934_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2212_hand.jpg", + "images/2025-01-14-11-53-50_2224_hand.jpg", + "images/2025-01-14-11-53-50_2236_hand.jpg", + "images/2025-01-14-11-53-50_2248_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2409_hand.jpg", + "images/2025-01-14-11-53-50_2443_hand.jpg", + "images/2025-01-14-11-53-50_2477_hand.jpg", + "images/2025-01-14-11-53-50_2511_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2834_hand.jpg", + "images/2025-01-14-11-53-50_2848_hand.jpg", + "images/2025-01-14-11-53-50_2863_hand.jpg", + "images/2025-01-14-11-53-50_2878_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-14-11-53-50_3130_hand.jpg", + "images/2025-01-14-11-53-50_3201_hand.jpg", + "images/2025-01-14-11-53-50_3272_hand.jpg", + "images/2025-01-14-11-53-50_3343_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-14-11-53-50_3983_hand.jpg", + "images/2025-01-14-11-53-50_3998_hand.jpg", + "images/2025-01-14-11-53-50_4013_hand.jpg", + "images/2025-01-14-11-53-50_4029_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-14-11-53-50_4246_hand.jpg", + "images/2025-01-14-11-53-50_4297_hand.jpg", + "images/2025-01-14-11-53-50_4349_hand.jpg", + "images/2025-01-14-11-53-50_4400_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this operation will fail" + } + ], + "images": [ + "images/2025-01-14-11-53-50_4870_hand.jpg", + "images/2025-01-14-11-53-50_4886_hand.jpg", + "images/2025-01-14-11-53-50_4902_hand.jpg", + "images/2025-01-14-11-53-50_4918_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-14-11-53-50_5076_hand.jpg", + "images/2025-01-14-11-53-50_5086_hand.jpg", + "images/2025-01-14-11-53-50_5096_hand.jpg", + "images/2025-01-14-11-53-50_5106_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this operation will fail" + } + ], + "images": [ + "images/2025-01-14-11-53-50_5252_hand.jpg", + "images/2025-01-14-11-53-50_5287_hand.jpg", + "images/2025-01-14-11-53-50_5322_hand.jpg", + "images/2025-01-14-11-53-50_5356_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-14-11-53-50_5691_hand.jpg", + "images/2025-01-14-11-53-50_5711_hand.jpg", + "images/2025-01-14-11-53-50_5731_hand.jpg", + "images/2025-01-14-11-53-50_5751_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this operation will fail" + } + ], + "images": [ + "images/2025-01-14-11-53-50_5945_hand.jpg", + "images/2025-01-14-11-53-50_5957_hand.jpg", + "images/2025-01-14-11-53-50_5970_hand.jpg", + "images/2025-01-14-11-53-50_5983_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6109_hand.jpg", + "images/2025-01-14-11-53-50_6119_hand.jpg", + "images/2025-01-14-11-53-50_6129_hand.jpg", + "images/2025-01-14-11-53-50_6139_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6259_hand.jpg", + "images/2025-01-14-11-53-50_6278_hand.jpg", + "images/2025-01-14-11-53-50_6297_hand.jpg", + "images/2025-01-14-11-53-50_6316_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6508_hand.jpg", + "images/2025-01-14-11-53-50_6523_hand.jpg", + "images/2025-01-14-11-53-50_6539_hand.jpg", + "images/2025-01-14-11-53-50_6555_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6733_hand.jpg", + "images/2025-01-14-11-53-50_6758_hand.jpg", + "images/2025-01-14-11-53-50_6784_hand.jpg", + "images/2025-01-14-11-53-50_6809_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7055_hand.jpg", + "images/2025-01-14-11-53-50_7069_hand.jpg", + "images/2025-01-14-11-53-50_7083_hand.jpg", + "images/2025-01-14-11-53-50_7097_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7237_hand.jpg", + "images/2025-01-14-11-53-50_7247_hand.jpg", + "images/2025-01-14-11-53-50_7257_hand.jpg", + "images/2025-01-14-11-53-50_7267_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7371_hand.jpg", + "images/2025-01-14-11-53-50_7382_hand.jpg", + "images/2025-01-14-11-53-50_7392_hand.jpg", + "images/2025-01-14-11-53-50_7403_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7508_hand.jpg", + "images/2025-01-14-11-53-50_7515_hand.jpg", + "images/2025-01-14-11-53-50_7523_hand.jpg", + "images/2025-01-14-11-53-50_7531_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7616_hand.jpg", + "images/2025-01-14-11-53-50_7626_hand.jpg", + "images/2025-01-14-11-53-50_7636_hand.jpg", + "images/2025-01-14-11-53-50_7646_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7769_hand.jpg", + "images/2025-01-14-11-53-50_7791_hand.jpg", + "images/2025-01-14-11-53-50_7812_hand.jpg", + "images/2025-01-14-11-53-50_7833_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8037_hand.jpg", + "images/2025-01-14-11-53-50_8049_hand.jpg", + "images/2025-01-14-11-53-50_8061_hand.jpg", + "images/2025-01-14-11-53-50_8073_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8197_hand.jpg", + "images/2025-01-14-11-53-50_8207_hand.jpg", + "images/2025-01-14-11-53-50_8218_hand.jpg", + "images/2025-01-14-11-53-50_8229_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8342_hand.jpg", + "images/2025-01-14-11-53-50_8353_hand.jpg", + "images/2025-01-14-11-53-50_8365_hand.jpg", + "images/2025-01-14-11-53-50_8376_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8564_hand.jpg", + "images/2025-01-14-11-53-50_8618_hand.jpg", + "images/2025-01-14-11-53-50_8672_hand.jpg", + "images/2025-01-14-11-53-50_8726_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9232_hand.jpg", + "images/2025-01-14-11-53-50_9254_hand.jpg", + "images/2025-01-14-11-53-50_9276_hand.jpg", + "images/2025-01-14-11-53-50_9298_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9507_hand.jpg", + "images/2025-01-14-11-53-50_9517_hand.jpg", + "images/2025-01-14-11-53-50_9527_hand.jpg", + "images/2025-01-14-11-53-50_9538_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9650_hand.jpg", + "images/2025-01-14-11-53-50_9664_hand.jpg", + "images/2025-01-14-11-53-50_9678_hand.jpg", + "images/2025-01-14-11-53-50_9692_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this operation will fail" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9868_hand.jpg", + "images/2025-01-14-11-53-50_9901_hand.jpg", + "images/2025-01-14-11-53-50_9933_hand.jpg", + "images/2025-01-14-11-53-50_9966_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10265_hand.jpg", + "images/2025-01-14-11-53-50_10275_hand.jpg", + "images/2025-01-14-11-53-50_10286_hand.jpg", + "images/2025-01-14-11-53-50_10296_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10417_hand.jpg", + "images/2025-01-14-11-53-50_10436_hand.jpg", + "images/2025-01-14-11-53-50_10454_hand.jpg", + "images/2025-01-14-11-53-50_10473_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10654_hand.jpg", + "images/2025-01-14-11-53-50_10668_hand.jpg", + "images/2025-01-14-11-53-50_10681_hand.jpg", + "images/2025-01-14-11-53-50_10694_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10839_hand.jpg", + "images/2025-01-14-11-53-50_10857_hand.jpg", + "images/2025-01-14-11-53-50_10876_hand.jpg", + "images/2025-01-14-11-53-50_10894_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. No — this operation will fail" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11094_hand.jpg", + "images/2025-01-14-11-53-50_11119_hand.jpg", + "images/2025-01-14-11-53-50_11143_hand.jpg", + "images/2025-01-14-11-53-50_11167_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11406_hand.jpg", + "images/2025-01-14-11-53-50_11422_hand.jpg", + "images/2025-01-14-11-53-50_11439_hand.jpg", + "images/2025-01-14-11-53-50_11455_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11620_hand.jpg", + "images/2025-01-14-11-53-50_11634_hand.jpg", + "images/2025-01-14-11-53-50_11648_hand.jpg", + "images/2025-01-14-11-53-50_11663_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11817_hand.jpg", + "images/2025-01-14-11-53-50_11835_hand.jpg", + "images/2025-01-14-11-53-50_11853_hand.jpg", + "images/2025-01-14-11-53-50_11871_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-14-11-53-50_12050_hand.jpg", + "images/2025-01-14-11-53-50_12062_hand.jpg", + "images/2025-01-14-11-53-50_12074_hand.jpg", + "images/2025-01-14-11-53-50_12086_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. No — this operation will fail" + } + ], + "images": [ + "images/2025-01-14-11-53-50_12323_hand.jpg", + "images/2025-01-14-11-53-50_12403_hand.jpg", + "images/2025-01-14-11-53-50_12482_hand.jpg", + "images/2025-01-14-11-53-50_12562_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-14-11-53-50_13309_hand.jpg", + "images/2025-01-14-11-53-50_13342_hand.jpg", + "images/2025-01-14-11-53-50_13375_hand.jpg", + "images/2025-01-14-11-53-50_13408_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-14-11-53-50_13719_hand.jpg", + "images/2025-01-14-11-53-50_13733_hand.jpg", + "images/2025-01-14-11-53-50_13746_hand.jpg", + "images/2025-01-14-11-53-50_13760_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-14-11-53-50_13921_hand.jpg", + "images/2025-01-14-11-53-50_13946_hand.jpg", + "images/2025-01-14-11-53-50_13971_hand.jpg", + "images/2025-01-14-11-53-50_13996_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. No — this operation will fail\nB. Yes — this operation will succeed\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14239_hand.jpg", + "images/2025-01-14-11-53-50_14255_hand.jpg", + "images/2025-01-14-11-53-50_14271_hand.jpg", + "images/2025-01-14-11-53-50_14287_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nBased on these early frames of the current operation, will this operation eventually succeed?\nOptions:\nA. Yes — this operation will succeed\nB. No — this operation will fail\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Yes — this operation will succeed" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14474_hand.jpg", + "images/2025-01-14-11-53-50_14503_hand.jpg", + "images/2025-01-14-11-53-50_14531_hand.jpg", + "images/2025-01-14-11-53-50_14559_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T7", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/2025-01-14-11-53-50_180_hand.jpg", + "images/2025-01-14-11-53-50_517_hand.jpg", + "images/2025-01-14-11-53-50_889_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/2025-01-14-11-53-50_517_hand.jpg", + "images/2025-01-14-11-53-50_1171_hand.jpg", + "images/2025-01-14-11-53-50_889_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/2025-01-14-11-53-50_889_hand.jpg", + "images/2025-01-14-11-53-50_1405_hand.jpg", + "images/2025-01-14-11-53-50_1171_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1405_hand.jpg", + "images/2025-01-14-11-53-50_1555_hand.jpg", + "images/2025-01-14-11-53-50_1171_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1405_hand.jpg", + "images/2025-01-14-11-53-50_1555_hand.jpg", + "images/2025-01-14-11-53-50_1710_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1710_hand.jpg", + "images/2025-01-14-11-53-50_1993_hand.jpg", + "images/2025-01-14-11-53-50_1555_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1993_hand.jpg", + "images/2025-01-14-11-53-50_1710_hand.jpg", + "images/2025-01-14-11-53-50_2271_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2579_hand.jpg", + "images/2025-01-14-11-53-50_2271_hand.jpg", + "images/2025-01-14-11-53-50_1993_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2579_hand.jpg", + "images/2025-01-14-11-53-50_2907_hand.jpg", + "images/2025-01-14-11-53-50_2271_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2579_hand.jpg", + "images/2025-01-14-11-53-50_2907_hand.jpg", + "images/2025-01-14-11-53-50_3010_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/2025-01-14-11-53-50_3010_hand.jpg", + "images/2025-01-14-11-53-50_2907_hand.jpg", + "images/2025-01-14-11-53-50_3485_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/2025-01-14-11-53-50_3485_hand.jpg", + "images/2025-01-14-11-53-50_3010_hand.jpg", + "images/2025-01-14-11-53-50_4059_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/2025-01-14-11-53-50_4059_hand.jpg", + "images/2025-01-14-11-53-50_4502_hand.jpg", + "images/2025-01-14-11-53-50_3485_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/2025-01-14-11-53-50_4502_hand.jpg", + "images/2025-01-14-11-53-50_4059_hand.jpg", + "images/2025-01-14-11-53-50_5126_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/2025-01-14-11-53-50_5126_hand.jpg", + "images/2025-01-14-11-53-50_4502_hand.jpg", + "images/2025-01-14-11-53-50_5790_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/2025-01-14-11-53-50_5126_hand.jpg", + "images/2025-01-14-11-53-50_6159_hand.jpg", + "images/2025-01-14-11-53-50_5790_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/2025-01-14-11-53-50_5790_hand.jpg", + "images/2025-01-14-11-53-50_6354_hand.jpg", + "images/2025-01-14-11-53-50_6159_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6354_hand.jpg", + "images/2025-01-14-11-53-50_6159_hand.jpg", + "images/2025-01-14-11-53-50_6586_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6860_hand.jpg", + "images/2025-01-14-11-53-50_6586_hand.jpg", + "images/2025-01-14-11-53-50_6354_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7125_hand.jpg", + "images/2025-01-14-11-53-50_6586_hand.jpg", + "images/2025-01-14-11-53-50_6860_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7286_hand.jpg", + "images/2025-01-14-11-53-50_6860_hand.jpg", + "images/2025-01-14-11-53-50_7125_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7424_hand.jpg", + "images/2025-01-14-11-53-50_7286_hand.jpg", + "images/2025-01-14-11-53-50_7125_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7424_hand.jpg", + "images/2025-01-14-11-53-50_7286_hand.jpg", + "images/2025-01-14-11-53-50_7546_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7424_hand.jpg", + "images/2025-01-14-11-53-50_7546_hand.jpg", + "images/2025-01-14-11-53-50_7666_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7666_hand.jpg", + "images/2025-01-14-11-53-50_7546_hand.jpg", + "images/2025-01-14-11-53-50_7875_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8097_hand.jpg", + "images/2025-01-14-11-53-50_7875_hand.jpg", + "images/2025-01-14-11-53-50_7666_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8251_hand.jpg", + "images/2025-01-14-11-53-50_8097_hand.jpg", + "images/2025-01-14-11-53-50_7875_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8097_hand.jpg", + "images/2025-01-14-11-53-50_8251_hand.jpg", + "images/2025-01-14-11-53-50_8398_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8834_hand.jpg", + "images/2025-01-14-11-53-50_8251_hand.jpg", + "images/2025-01-14-11-53-50_8398_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8834_hand.jpg", + "images/2025-01-14-11-53-50_9342_hand.jpg", + "images/2025-01-14-11-53-50_8398_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8834_hand.jpg", + "images/2025-01-14-11-53-50_9558_hand.jpg", + "images/2025-01-14-11-53-50_9342_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9720_hand.jpg", + "images/2025-01-14-11-53-50_9558_hand.jpg", + "images/2025-01-14-11-53-50_9342_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9720_hand.jpg", + "images/2025-01-14-11-53-50_9558_hand.jpg", + "images/2025-01-14-11-53-50_10317_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10509_hand.jpg", + "images/2025-01-14-11-53-50_9720_hand.jpg", + "images/2025-01-14-11-53-50_10317_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10317_hand.jpg", + "images/2025-01-14-11-53-50_10720_hand.jpg", + "images/2025-01-14-11-53-50_10509_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10509_hand.jpg", + "images/2025-01-14-11-53-50_10930_hand.jpg", + "images/2025-01-14-11-53-50_10720_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10930_hand.jpg", + "images/2025-01-14-11-53-50_11487_hand.jpg", + "images/2025-01-14-11-53-50_10720_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11691_hand.jpg", + "images/2025-01-14-11-53-50_11487_hand.jpg", + "images/2025-01-14-11-53-50_10930_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11907_hand.jpg", + "images/2025-01-14-11-53-50_11487_hand.jpg", + "images/2025-01-14-11-53-50_11691_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11907_hand.jpg", + "images/2025-01-14-11-53-50_12110_hand.jpg", + "images/2025-01-14-11-53-50_11691_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/2025-01-14-11-53-50_12110_hand.jpg", + "images/2025-01-14-11-53-50_13475_hand.jpg", + "images/2025-01-14-11-53-50_11907_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/2025-01-14-11-53-50_13475_hand.jpg", + "images/2025-01-14-11-53-50_12110_hand.jpg", + "images/2025-01-14-11-53-50_13787_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/2025-01-14-11-53-50_13475_hand.jpg", + "images/2025-01-14-11-53-50_13787_hand.jpg", + "images/2025-01-14-11-53-50_14046_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14319_hand.jpg", + "images/2025-01-14-11-53-50_13787_hand.jpg", + "images/2025-01-14-11-53-50_14046_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14319_hand.jpg", + "images/2025-01-14-11-53-50_14046_hand.jpg", + "images/2025-01-14-11-53-50_14615_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_temporal", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_180_hand.jpg", + "images/2025-01-14-11-53-50_517_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_517_hand.jpg", + "images/2025-01-14-11-53-50_889_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_889_hand.jpg", + "images/2025-01-14-11-53-50_1171_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1171_hand.jpg", + "images/2025-01-14-11-53-50_1405_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1405_hand.jpg", + "images/2025-01-14-11-53-50_1555_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1555_hand.jpg", + "images/2025-01-14-11-53-50_1710_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1710_hand.jpg", + "images/2025-01-14-11-53-50_1993_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1993_hand.jpg", + "images/2025-01-14-11-53-50_2271_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2579_hand.jpg", + "images/2025-01-14-11-53-50_2271_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2907_hand.jpg", + "images/2025-01-14-11-53-50_2579_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_3010_hand.jpg", + "images/2025-01-14-11-53-50_2907_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_3485_hand.jpg", + "images/2025-01-14-11-53-50_3010_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_4059_hand.jpg", + "images/2025-01-14-11-53-50_3485_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_4502_hand.jpg", + "images/2025-01-14-11-53-50_4059_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_5126_hand.jpg", + "images/2025-01-14-11-53-50_4502_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_5126_hand.jpg", + "images/2025-01-14-11-53-50_5790_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_5790_hand.jpg", + "images/2025-01-14-11-53-50_6159_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6159_hand.jpg", + "images/2025-01-14-11-53-50_6354_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6586_hand.jpg", + "images/2025-01-14-11-53-50_6354_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6860_hand.jpg", + "images/2025-01-14-11-53-50_6586_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7125_hand.jpg", + "images/2025-01-14-11-53-50_6860_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7286_hand.jpg", + "images/2025-01-14-11-53-50_7125_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7286_hand.jpg", + "images/2025-01-14-11-53-50_7424_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7424_hand.jpg", + "images/2025-01-14-11-53-50_7546_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7546_hand.jpg", + "images/2025-01-14-11-53-50_7666_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7875_hand.jpg", + "images/2025-01-14-11-53-50_7666_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8097_hand.jpg", + "images/2025-01-14-11-53-50_7875_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8251_hand.jpg", + "images/2025-01-14-11-53-50_8097_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8398_hand.jpg", + "images/2025-01-14-11-53-50_8251_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8834_hand.jpg", + "images/2025-01-14-11-53-50_8398_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9342_hand.jpg", + "images/2025-01-14-11-53-50_8834_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9558_hand.jpg", + "images/2025-01-14-11-53-50_9342_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9558_hand.jpg", + "images/2025-01-14-11-53-50_9720_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10317_hand.jpg", + "images/2025-01-14-11-53-50_9720_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10317_hand.jpg", + "images/2025-01-14-11-53-50_10509_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10720_hand.jpg", + "images/2025-01-14-11-53-50_10509_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10930_hand.jpg", + "images/2025-01-14-11-53-50_10720_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10930_hand.jpg", + "images/2025-01-14-11-53-50_11487_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11691_hand.jpg", + "images/2025-01-14-11-53-50_11487_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11907_hand.jpg", + "images/2025-01-14-11-53-50_11691_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_12110_hand.jpg", + "images/2025-01-14-11-53-50_11907_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_12110_hand.jpg", + "images/2025-01-14-11-53-50_13475_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_13475_hand.jpg", + "images/2025-01-14-11-53-50_13787_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14046_hand.jpg", + "images/2025-01-14-11-53-50_13787_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14046_hand.jpg", + "images/2025-01-14-11-53-50_14319_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14319_hand.jpg", + "images/2025-01-14-11-53-50_14615_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_889_hand.jpg", + "images/2025-01-14-11-53-50_180_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1171_hand.jpg", + "images/2025-01-14-11-53-50_517_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_889_hand.jpg", + "images/2025-01-14-11-53-50_1405_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1555_hand.jpg", + "images/2025-01-14-11-53-50_1171_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1710_hand.jpg", + "images/2025-01-14-11-53-50_1405_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1993_hand.jpg", + "images/2025-01-14-11-53-50_1555_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1710_hand.jpg", + "images/2025-01-14-11-53-50_2271_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1993_hand.jpg", + "images/2025-01-14-11-53-50_2579_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2271_hand.jpg", + "images/2025-01-14-11-53-50_2907_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2579_hand.jpg", + "images/2025-01-14-11-53-50_3010_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_3485_hand.jpg", + "images/2025-01-14-11-53-50_2907_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_4059_hand.jpg", + "images/2025-01-14-11-53-50_3010_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_4502_hand.jpg", + "images/2025-01-14-11-53-50_3485_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_5126_hand.jpg", + "images/2025-01-14-11-53-50_4059_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_5790_hand.jpg", + "images/2025-01-14-11-53-50_4502_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6159_hand.jpg", + "images/2025-01-14-11-53-50_5126_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6354_hand.jpg", + "images/2025-01-14-11-53-50_5790_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6159_hand.jpg", + "images/2025-01-14-11-53-50_6586_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6354_hand.jpg", + "images/2025-01-14-11-53-50_6860_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6586_hand.jpg", + "images/2025-01-14-11-53-50_7125_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7286_hand.jpg", + "images/2025-01-14-11-53-50_6860_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7125_hand.jpg", + "images/2025-01-14-11-53-50_7424_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7546_hand.jpg", + "images/2025-01-14-11-53-50_7286_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7666_hand.jpg", + "images/2025-01-14-11-53-50_7424_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7546_hand.jpg", + "images/2025-01-14-11-53-50_7875_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7666_hand.jpg", + "images/2025-01-14-11-53-50_8097_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7875_hand.jpg", + "images/2025-01-14-11-53-50_8251_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8398_hand.jpg", + "images/2025-01-14-11-53-50_8097_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8834_hand.jpg", + "images/2025-01-14-11-53-50_8251_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9342_hand.jpg", + "images/2025-01-14-11-53-50_8398_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9558_hand.jpg", + "images/2025-01-14-11-53-50_8834_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9342_hand.jpg", + "images/2025-01-14-11-53-50_9720_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9558_hand.jpg", + "images/2025-01-14-11-53-50_10317_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10509_hand.jpg", + "images/2025-01-14-11-53-50_9720_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10720_hand.jpg", + "images/2025-01-14-11-53-50_10317_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10509_hand.jpg", + "images/2025-01-14-11-53-50_10930_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10720_hand.jpg", + "images/2025-01-14-11-53-50_11487_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10930_hand.jpg", + "images/2025-01-14-11-53-50_11691_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11487_hand.jpg", + "images/2025-01-14-11-53-50_11907_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_12110_hand.jpg", + "images/2025-01-14-11-53-50_11691_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11907_hand.jpg", + "images/2025-01-14-11-53-50_13475_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_12110_hand.jpg", + "images/2025-01-14-11-53-50_13787_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14046_hand.jpg", + "images/2025-01-14-11-53-50_13475_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "X. Image X happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14319_hand.jpg", + "images/2025-01-14-11-53-50_13787_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "X" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nA single comparison image shows two labeled panels from the same robot manipulation recording. The labels X and Y are arbitrary identifiers and do not indicate temporal order. Which labeled panel happened earlier in the real manipulation sequence?\nOptions:\nX. Image X happened earlier\nY. Image Y happened earlier\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "Y. Image Y happened earlier" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14046_hand.jpg", + "images/2025-01-14-11-53-50_14615_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_binary", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "Y" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_142_hand.jpg", + "images/2025-01-14-11-53-50_145_hand.jpg", + "images/2025-01-14-11-53-50_148_hand.jpg", + "images/2025-01-14-11-53-50_151_hand.jpg", + "images/2025-01-14-11-53-50_154_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_174_hand.jpg", + "images/2025-01-14-11-53-50_177_hand.jpg", + "images/2025-01-14-11-53-50_180_hand.jpg", + "images/2025-01-14-11-53-50_183_hand.jpg", + "images/2025-01-14-11-53-50_186_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_207_hand.jpg", + "images/2025-01-14-11-53-50_210_hand.jpg", + "images/2025-01-14-11-53-50_213_hand.jpg", + "images/2025-01-14-11-53-50_216_hand.jpg", + "images/2025-01-14-11-53-50_219_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_426_hand.jpg", + "images/2025-01-14-11-53-50_429_hand.jpg", + "images/2025-01-14-11-53-50_432_hand.jpg", + "images/2025-01-14-11-53-50_435_hand.jpg", + "images/2025-01-14-11-53-50_438_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_512_hand.jpg", + "images/2025-01-14-11-53-50_515_hand.jpg", + "images/2025-01-14-11-53-50_518_hand.jpg", + "images/2025-01-14-11-53-50_521_hand.jpg", + "images/2025-01-14-11-53-50_524_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_597_hand.jpg", + "images/2025-01-14-11-53-50_600_hand.jpg", + "images/2025-01-14-11-53-50_603_hand.jpg", + "images/2025-01-14-11-53-50_606_hand.jpg", + "images/2025-01-14-11-53-50_609_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_839_hand.jpg", + "images/2025-01-14-11-53-50_842_hand.jpg", + "images/2025-01-14-11-53-50_845_hand.jpg", + "images/2025-01-14-11-53-50_848_hand.jpg", + "images/2025-01-14-11-53-50_851_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_884_hand.jpg", + "images/2025-01-14-11-53-50_887_hand.jpg", + "images/2025-01-14-11-53-50_890_hand.jpg", + "images/2025-01-14-11-53-50_893_hand.jpg", + "images/2025-01-14-11-53-50_896_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_928_hand.jpg", + "images/2025-01-14-11-53-50_931_hand.jpg", + "images/2025-01-14-11-53-50_934_hand.jpg", + "images/2025-01-14-11-53-50_937_hand.jpg", + "images/2025-01-14-11-53-50_940_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1113_hand.jpg", + "images/2025-01-14-11-53-50_1116_hand.jpg", + "images/2025-01-14-11-53-50_1119_hand.jpg", + "images/2025-01-14-11-53-50_1122_hand.jpg", + "images/2025-01-14-11-53-50_1125_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1166_hand.jpg", + "images/2025-01-14-11-53-50_1169_hand.jpg", + "images/2025-01-14-11-53-50_1172_hand.jpg", + "images/2025-01-14-11-53-50_1175_hand.jpg", + "images/2025-01-14-11-53-50_1178_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1218_hand.jpg", + "images/2025-01-14-11-53-50_1221_hand.jpg", + "images/2025-01-14-11-53-50_1224_hand.jpg", + "images/2025-01-14-11-53-50_1227_hand.jpg", + "images/2025-01-14-11-53-50_1230_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1371_hand.jpg", + "images/2025-01-14-11-53-50_1374_hand.jpg", + "images/2025-01-14-11-53-50_1377_hand.jpg", + "images/2025-01-14-11-53-50_1380_hand.jpg", + "images/2025-01-14-11-53-50_1383_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1400_hand.jpg", + "images/2025-01-14-11-53-50_1403_hand.jpg", + "images/2025-01-14-11-53-50_1406_hand.jpg", + "images/2025-01-14-11-53-50_1409_hand.jpg", + "images/2025-01-14-11-53-50_1412_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1428_hand.jpg", + "images/2025-01-14-11-53-50_1431_hand.jpg", + "images/2025-01-14-11-53-50_1434_hand.jpg", + "images/2025-01-14-11-53-50_1437_hand.jpg", + "images/2025-01-14-11-53-50_1440_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1526_hand.jpg", + "images/2025-01-14-11-53-50_1529_hand.jpg", + "images/2025-01-14-11-53-50_1532_hand.jpg", + "images/2025-01-14-11-53-50_1535_hand.jpg", + "images/2025-01-14-11-53-50_1538_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1550_hand.jpg", + "images/2025-01-14-11-53-50_1553_hand.jpg", + "images/2025-01-14-11-53-50_1556_hand.jpg", + "images/2025-01-14-11-53-50_1559_hand.jpg", + "images/2025-01-14-11-53-50_1562_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1573_hand.jpg", + "images/2025-01-14-11-53-50_1576_hand.jpg", + "images/2025-01-14-11-53-50_1579_hand.jpg", + "images/2025-01-14-11-53-50_1582_hand.jpg", + "images/2025-01-14-11-53-50_1585_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1674_hand.jpg", + "images/2025-01-14-11-53-50_1677_hand.jpg", + "images/2025-01-14-11-53-50_1680_hand.jpg", + "images/2025-01-14-11-53-50_1683_hand.jpg", + "images/2025-01-14-11-53-50_1686_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1704_hand.jpg", + "images/2025-01-14-11-53-50_1707_hand.jpg", + "images/2025-01-14-11-53-50_1710_hand.jpg", + "images/2025-01-14-11-53-50_1713_hand.jpg", + "images/2025-01-14-11-53-50_1716_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1734_hand.jpg", + "images/2025-01-14-11-53-50_1737_hand.jpg", + "images/2025-01-14-11-53-50_1740_hand.jpg", + "images/2025-01-14-11-53-50_1743_hand.jpg", + "images/2025-01-14-11-53-50_1746_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1918_hand.jpg", + "images/2025-01-14-11-53-50_1921_hand.jpg", + "images/2025-01-14-11-53-50_1924_hand.jpg", + "images/2025-01-14-11-53-50_1927_hand.jpg", + "images/2025-01-14-11-53-50_1930_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1988_hand.jpg", + "images/2025-01-14-11-53-50_1991_hand.jpg", + "images/2025-01-14-11-53-50_1994_hand.jpg", + "images/2025-01-14-11-53-50_1997_hand.jpg", + "images/2025-01-14-11-53-50_2000_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2057_hand.jpg", + "images/2025-01-14-11-53-50_2060_hand.jpg", + "images/2025-01-14-11-53-50_2063_hand.jpg", + "images/2025-01-14-11-53-50_2066_hand.jpg", + "images/2025-01-14-11-53-50_2069_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2238_hand.jpg", + "images/2025-01-14-11-53-50_2241_hand.jpg", + "images/2025-01-14-11-53-50_2244_hand.jpg", + "images/2025-01-14-11-53-50_2247_hand.jpg", + "images/2025-01-14-11-53-50_2250_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2266_hand.jpg", + "images/2025-01-14-11-53-50_2269_hand.jpg", + "images/2025-01-14-11-53-50_2272_hand.jpg", + "images/2025-01-14-11-53-50_2275_hand.jpg", + "images/2025-01-14-11-53-50_2278_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2293_hand.jpg", + "images/2025-01-14-11-53-50_2296_hand.jpg", + "images/2025-01-14-11-53-50_2299_hand.jpg", + "images/2025-01-14-11-53-50_2302_hand.jpg", + "images/2025-01-14-11-53-50_2305_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2494_hand.jpg", + "images/2025-01-14-11-53-50_2497_hand.jpg", + "images/2025-01-14-11-53-50_2500_hand.jpg", + "images/2025-01-14-11-53-50_2503_hand.jpg", + "images/2025-01-14-11-53-50_2506_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2574_hand.jpg", + "images/2025-01-14-11-53-50_2577_hand.jpg", + "images/2025-01-14-11-53-50_2580_hand.jpg", + "images/2025-01-14-11-53-50_2583_hand.jpg", + "images/2025-01-14-11-53-50_2586_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2653_hand.jpg", + "images/2025-01-14-11-53-50_2656_hand.jpg", + "images/2025-01-14-11-53-50_2659_hand.jpg", + "images/2025-01-14-11-53-50_2662_hand.jpg", + "images/2025-01-14-11-53-50_2665_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2867_hand.jpg", + "images/2025-01-14-11-53-50_2870_hand.jpg", + "images/2025-01-14-11-53-50_2873_hand.jpg", + "images/2025-01-14-11-53-50_2876_hand.jpg", + "images/2025-01-14-11-53-50_2879_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2902_hand.jpg", + "images/2025-01-14-11-53-50_2905_hand.jpg", + "images/2025-01-14-11-53-50_2908_hand.jpg", + "images/2025-01-14-11-53-50_2911_hand.jpg", + "images/2025-01-14-11-53-50_2914_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2936_hand.jpg", + "images/2025-01-14-11-53-50_2939_hand.jpg", + "images/2025-01-14-11-53-50_2942_hand.jpg", + "images/2025-01-14-11-53-50_2945_hand.jpg", + "images/2025-01-14-11-53-50_2948_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_3314_hand.jpg", + "images/2025-01-14-11-53-50_3317_hand.jpg", + "images/2025-01-14-11-53-50_3320_hand.jpg", + "images/2025-01-14-11-53-50_3323_hand.jpg", + "images/2025-01-14-11-53-50_3326_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_3479_hand.jpg", + "images/2025-01-14-11-53-50_3482_hand.jpg", + "images/2025-01-14-11-53-50_3485_hand.jpg", + "images/2025-01-14-11-53-50_3488_hand.jpg", + "images/2025-01-14-11-53-50_3491_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_3644_hand.jpg", + "images/2025-01-14-11-53-50_3647_hand.jpg", + "images/2025-01-14-11-53-50_3650_hand.jpg", + "images/2025-01-14-11-53-50_3653_hand.jpg", + "images/2025-01-14-11-53-50_3656_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_4018_hand.jpg", + "images/2025-01-14-11-53-50_4021_hand.jpg", + "images/2025-01-14-11-53-50_4024_hand.jpg", + "images/2025-01-14-11-53-50_4027_hand.jpg", + "images/2025-01-14-11-53-50_4030_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_4053_hand.jpg", + "images/2025-01-14-11-53-50_4056_hand.jpg", + "images/2025-01-14-11-53-50_4059_hand.jpg", + "images/2025-01-14-11-53-50_4062_hand.jpg", + "images/2025-01-14-11-53-50_4065_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_4088_hand.jpg", + "images/2025-01-14-11-53-50_4091_hand.jpg", + "images/2025-01-14-11-53-50_4094_hand.jpg", + "images/2025-01-14-11-53-50_4097_hand.jpg", + "images/2025-01-14-11-53-50_4100_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_4377_hand.jpg", + "images/2025-01-14-11-53-50_4380_hand.jpg", + "images/2025-01-14-11-53-50_4383_hand.jpg", + "images/2025-01-14-11-53-50_4386_hand.jpg", + "images/2025-01-14-11-53-50_4389_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_4496_hand.jpg", + "images/2025-01-14-11-53-50_4499_hand.jpg", + "images/2025-01-14-11-53-50_4502_hand.jpg", + "images/2025-01-14-11-53-50_4505_hand.jpg", + "images/2025-01-14-11-53-50_4508_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_4616_hand.jpg", + "images/2025-01-14-11-53-50_4619_hand.jpg", + "images/2025-01-14-11-53-50_4622_hand.jpg", + "images/2025-01-14-11-53-50_4625_hand.jpg", + "images/2025-01-14-11-53-50_4628_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_5097_hand.jpg", + "images/2025-01-14-11-53-50_5100_hand.jpg", + "images/2025-01-14-11-53-50_5103_hand.jpg", + "images/2025-01-14-11-53-50_5106_hand.jpg", + "images/2025-01-14-11-53-50_5109_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_5120_hand.jpg", + "images/2025-01-14-11-53-50_5123_hand.jpg", + "images/2025-01-14-11-53-50_5126_hand.jpg", + "images/2025-01-14-11-53-50_5129_hand.jpg", + "images/2025-01-14-11-53-50_5132_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_5144_hand.jpg", + "images/2025-01-14-11-53-50_5147_hand.jpg", + "images/2025-01-14-11-53-50_5150_hand.jpg", + "images/2025-01-14-11-53-50_5153_hand.jpg", + "images/2025-01-14-11-53-50_5156_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_5738_hand.jpg", + "images/2025-01-14-11-53-50_5741_hand.jpg", + "images/2025-01-14-11-53-50_5744_hand.jpg", + "images/2025-01-14-11-53-50_5747_hand.jpg", + "images/2025-01-14-11-53-50_5750_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_5784_hand.jpg", + "images/2025-01-14-11-53-50_5787_hand.jpg", + "images/2025-01-14-11-53-50_5790_hand.jpg", + "images/2025-01-14-11-53-50_5793_hand.jpg", + "images/2025-01-14-11-53-50_5796_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_5831_hand.jpg", + "images/2025-01-14-11-53-50_5834_hand.jpg", + "images/2025-01-14-11-53-50_5837_hand.jpg", + "images/2025-01-14-11-53-50_5840_hand.jpg", + "images/2025-01-14-11-53-50_5843_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6130_hand.jpg", + "images/2025-01-14-11-53-50_6133_hand.jpg", + "images/2025-01-14-11-53-50_6136_hand.jpg", + "images/2025-01-14-11-53-50_6139_hand.jpg", + "images/2025-01-14-11-53-50_6142_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6153_hand.jpg", + "images/2025-01-14-11-53-50_6156_hand.jpg", + "images/2025-01-14-11-53-50_6159_hand.jpg", + "images/2025-01-14-11-53-50_6162_hand.jpg", + "images/2025-01-14-11-53-50_6165_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6176_hand.jpg", + "images/2025-01-14-11-53-50_6179_hand.jpg", + "images/2025-01-14-11-53-50_6182_hand.jpg", + "images/2025-01-14-11-53-50_6185_hand.jpg", + "images/2025-01-14-11-53-50_6188_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6304_hand.jpg", + "images/2025-01-14-11-53-50_6307_hand.jpg", + "images/2025-01-14-11-53-50_6310_hand.jpg", + "images/2025-01-14-11-53-50_6313_hand.jpg", + "images/2025-01-14-11-53-50_6316_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6348_hand.jpg", + "images/2025-01-14-11-53-50_6351_hand.jpg", + "images/2025-01-14-11-53-50_6354_hand.jpg", + "images/2025-01-14-11-53-50_6357_hand.jpg", + "images/2025-01-14-11-53-50_6360_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6392_hand.jpg", + "images/2025-01-14-11-53-50_6395_hand.jpg", + "images/2025-01-14-11-53-50_6398_hand.jpg", + "images/2025-01-14-11-53-50_6401_hand.jpg", + "images/2025-01-14-11-53-50_6404_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6544_hand.jpg", + "images/2025-01-14-11-53-50_6547_hand.jpg", + "images/2025-01-14-11-53-50_6550_hand.jpg", + "images/2025-01-14-11-53-50_6553_hand.jpg", + "images/2025-01-14-11-53-50_6556_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6580_hand.jpg", + "images/2025-01-14-11-53-50_6583_hand.jpg", + "images/2025-01-14-11-53-50_6586_hand.jpg", + "images/2025-01-14-11-53-50_6589_hand.jpg", + "images/2025-01-14-11-53-50_6592_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6616_hand.jpg", + "images/2025-01-14-11-53-50_6619_hand.jpg", + "images/2025-01-14-11-53-50_6622_hand.jpg", + "images/2025-01-14-11-53-50_6625_hand.jpg", + "images/2025-01-14-11-53-50_6628_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6795_hand.jpg", + "images/2025-01-14-11-53-50_6798_hand.jpg", + "images/2025-01-14-11-53-50_6801_hand.jpg", + "images/2025-01-14-11-53-50_6804_hand.jpg", + "images/2025-01-14-11-53-50_6807_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6854_hand.jpg", + "images/2025-01-14-11-53-50_6857_hand.jpg", + "images/2025-01-14-11-53-50_6860_hand.jpg", + "images/2025-01-14-11-53-50_6863_hand.jpg", + "images/2025-01-14-11-53-50_6866_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6913_hand.jpg", + "images/2025-01-14-11-53-50_6916_hand.jpg", + "images/2025-01-14-11-53-50_6919_hand.jpg", + "images/2025-01-14-11-53-50_6922_hand.jpg", + "images/2025-01-14-11-53-50_6925_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7086_hand.jpg", + "images/2025-01-14-11-53-50_7089_hand.jpg", + "images/2025-01-14-11-53-50_7092_hand.jpg", + "images/2025-01-14-11-53-50_7095_hand.jpg", + "images/2025-01-14-11-53-50_7098_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7119_hand.jpg", + "images/2025-01-14-11-53-50_7122_hand.jpg", + "images/2025-01-14-11-53-50_7125_hand.jpg", + "images/2025-01-14-11-53-50_7128_hand.jpg", + "images/2025-01-14-11-53-50_7131_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7152_hand.jpg", + "images/2025-01-14-11-53-50_7155_hand.jpg", + "images/2025-01-14-11-53-50_7158_hand.jpg", + "images/2025-01-14-11-53-50_7161_hand.jpg", + "images/2025-01-14-11-53-50_7164_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7258_hand.jpg", + "images/2025-01-14-11-53-50_7261_hand.jpg", + "images/2025-01-14-11-53-50_7264_hand.jpg", + "images/2025-01-14-11-53-50_7267_hand.jpg", + "images/2025-01-14-11-53-50_7270_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7280_hand.jpg", + "images/2025-01-14-11-53-50_7283_hand.jpg", + "images/2025-01-14-11-53-50_7286_hand.jpg", + "images/2025-01-14-11-53-50_7289_hand.jpg", + "images/2025-01-14-11-53-50_7292_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7303_hand.jpg", + "images/2025-01-14-11-53-50_7306_hand.jpg", + "images/2025-01-14-11-53-50_7309_hand.jpg", + "images/2025-01-14-11-53-50_7312_hand.jpg", + "images/2025-01-14-11-53-50_7315_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7394_hand.jpg", + "images/2025-01-14-11-53-50_7397_hand.jpg", + "images/2025-01-14-11-53-50_7400_hand.jpg", + "images/2025-01-14-11-53-50_7403_hand.jpg", + "images/2025-01-14-11-53-50_7406_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7418_hand.jpg", + "images/2025-01-14-11-53-50_7421_hand.jpg", + "images/2025-01-14-11-53-50_7424_hand.jpg", + "images/2025-01-14-11-53-50_7427_hand.jpg", + "images/2025-01-14-11-53-50_7430_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7442_hand.jpg", + "images/2025-01-14-11-53-50_7445_hand.jpg", + "images/2025-01-14-11-53-50_7448_hand.jpg", + "images/2025-01-14-11-53-50_7451_hand.jpg", + "images/2025-01-14-11-53-50_7454_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7523_hand.jpg", + "images/2025-01-14-11-53-50_7526_hand.jpg", + "images/2025-01-14-11-53-50_7529_hand.jpg", + "images/2025-01-14-11-53-50_7532_hand.jpg", + "images/2025-01-14-11-53-50_7535_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7540_hand.jpg", + "images/2025-01-14-11-53-50_7543_hand.jpg", + "images/2025-01-14-11-53-50_7546_hand.jpg", + "images/2025-01-14-11-53-50_7549_hand.jpg", + "images/2025-01-14-11-53-50_7552_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7558_hand.jpg", + "images/2025-01-14-11-53-50_7561_hand.jpg", + "images/2025-01-14-11-53-50_7564_hand.jpg", + "images/2025-01-14-11-53-50_7567_hand.jpg", + "images/2025-01-14-11-53-50_7570_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7637_hand.jpg", + "images/2025-01-14-11-53-50_7640_hand.jpg", + "images/2025-01-14-11-53-50_7643_hand.jpg", + "images/2025-01-14-11-53-50_7646_hand.jpg", + "images/2025-01-14-11-53-50_7649_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7660_hand.jpg", + "images/2025-01-14-11-53-50_7663_hand.jpg", + "images/2025-01-14-11-53-50_7666_hand.jpg", + "images/2025-01-14-11-53-50_7669_hand.jpg", + "images/2025-01-14-11-53-50_7672_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7684_hand.jpg", + "images/2025-01-14-11-53-50_7687_hand.jpg", + "images/2025-01-14-11-53-50_7690_hand.jpg", + "images/2025-01-14-11-53-50_7693_hand.jpg", + "images/2025-01-14-11-53-50_7696_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7820_hand.jpg", + "images/2025-01-14-11-53-50_7823_hand.jpg", + "images/2025-01-14-11-53-50_7826_hand.jpg", + "images/2025-01-14-11-53-50_7829_hand.jpg", + "images/2025-01-14-11-53-50_7832_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7869_hand.jpg", + "images/2025-01-14-11-53-50_7872_hand.jpg", + "images/2025-01-14-11-53-50_7875_hand.jpg", + "images/2025-01-14-11-53-50_7878_hand.jpg", + "images/2025-01-14-11-53-50_7881_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7918_hand.jpg", + "images/2025-01-14-11-53-50_7921_hand.jpg", + "images/2025-01-14-11-53-50_7924_hand.jpg", + "images/2025-01-14-11-53-50_7927_hand.jpg", + "images/2025-01-14-11-53-50_7930_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8063_hand.jpg", + "images/2025-01-14-11-53-50_8066_hand.jpg", + "images/2025-01-14-11-53-50_8069_hand.jpg", + "images/2025-01-14-11-53-50_8072_hand.jpg", + "images/2025-01-14-11-53-50_8075_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8091_hand.jpg", + "images/2025-01-14-11-53-50_8094_hand.jpg", + "images/2025-01-14-11-53-50_8097_hand.jpg", + "images/2025-01-14-11-53-50_8100_hand.jpg", + "images/2025-01-14-11-53-50_8103_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8119_hand.jpg", + "images/2025-01-14-11-53-50_8122_hand.jpg", + "images/2025-01-14-11-53-50_8125_hand.jpg", + "images/2025-01-14-11-53-50_8128_hand.jpg", + "images/2025-01-14-11-53-50_8131_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8220_hand.jpg", + "images/2025-01-14-11-53-50_8223_hand.jpg", + "images/2025-01-14-11-53-50_8226_hand.jpg", + "images/2025-01-14-11-53-50_8229_hand.jpg", + "images/2025-01-14-11-53-50_8232_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8245_hand.jpg", + "images/2025-01-14-11-53-50_8248_hand.jpg", + "images/2025-01-14-11-53-50_8251_hand.jpg", + "images/2025-01-14-11-53-50_8254_hand.jpg", + "images/2025-01-14-11-53-50_8257_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8270_hand.jpg", + "images/2025-01-14-11-53-50_8273_hand.jpg", + "images/2025-01-14-11-53-50_8276_hand.jpg", + "images/2025-01-14-11-53-50_8279_hand.jpg", + "images/2025-01-14-11-53-50_8282_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8366_hand.jpg", + "images/2025-01-14-11-53-50_8369_hand.jpg", + "images/2025-01-14-11-53-50_8372_hand.jpg", + "images/2025-01-14-11-53-50_8375_hand.jpg", + "images/2025-01-14-11-53-50_8378_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8392_hand.jpg", + "images/2025-01-14-11-53-50_8395_hand.jpg", + "images/2025-01-14-11-53-50_8398_hand.jpg", + "images/2025-01-14-11-53-50_8401_hand.jpg", + "images/2025-01-14-11-53-50_8404_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8419_hand.jpg", + "images/2025-01-14-11-53-50_8422_hand.jpg", + "images/2025-01-14-11-53-50_8425_hand.jpg", + "images/2025-01-14-11-53-50_8428_hand.jpg", + "images/2025-01-14-11-53-50_8431_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8702_hand.jpg", + "images/2025-01-14-11-53-50_8705_hand.jpg", + "images/2025-01-14-11-53-50_8708_hand.jpg", + "images/2025-01-14-11-53-50_8711_hand.jpg", + "images/2025-01-14-11-53-50_8714_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8828_hand.jpg", + "images/2025-01-14-11-53-50_8831_hand.jpg", + "images/2025-01-14-11-53-50_8834_hand.jpg", + "images/2025-01-14-11-53-50_8837_hand.jpg", + "images/2025-01-14-11-53-50_8840_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8955_hand.jpg", + "images/2025-01-14-11-53-50_8958_hand.jpg", + "images/2025-01-14-11-53-50_8961_hand.jpg", + "images/2025-01-14-11-53-50_8964_hand.jpg", + "images/2025-01-14-11-53-50_8967_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9285_hand.jpg", + "images/2025-01-14-11-53-50_9288_hand.jpg", + "images/2025-01-14-11-53-50_9291_hand.jpg", + "images/2025-01-14-11-53-50_9294_hand.jpg", + "images/2025-01-14-11-53-50_9297_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9336_hand.jpg", + "images/2025-01-14-11-53-50_9339_hand.jpg", + "images/2025-01-14-11-53-50_9342_hand.jpg", + "images/2025-01-14-11-53-50_9345_hand.jpg", + "images/2025-01-14-11-53-50_9348_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9388_hand.jpg", + "images/2025-01-14-11-53-50_9391_hand.jpg", + "images/2025-01-14-11-53-50_9394_hand.jpg", + "images/2025-01-14-11-53-50_9397_hand.jpg", + "images/2025-01-14-11-53-50_9400_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9528_hand.jpg", + "images/2025-01-14-11-53-50_9531_hand.jpg", + "images/2025-01-14-11-53-50_9534_hand.jpg", + "images/2025-01-14-11-53-50_9537_hand.jpg", + "images/2025-01-14-11-53-50_9540_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9552_hand.jpg", + "images/2025-01-14-11-53-50_9555_hand.jpg", + "images/2025-01-14-11-53-50_9558_hand.jpg", + "images/2025-01-14-11-53-50_9561_hand.jpg", + "images/2025-01-14-11-53-50_9564_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9576_hand.jpg", + "images/2025-01-14-11-53-50_9579_hand.jpg", + "images/2025-01-14-11-53-50_9582_hand.jpg", + "images/2025-01-14-11-53-50_9585_hand.jpg", + "images/2025-01-14-11-53-50_9588_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9682_hand.jpg", + "images/2025-01-14-11-53-50_9685_hand.jpg", + "images/2025-01-14-11-53-50_9688_hand.jpg", + "images/2025-01-14-11-53-50_9691_hand.jpg", + "images/2025-01-14-11-53-50_9694_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9714_hand.jpg", + "images/2025-01-14-11-53-50_9717_hand.jpg", + "images/2025-01-14-11-53-50_9720_hand.jpg", + "images/2025-01-14-11-53-50_9723_hand.jpg", + "images/2025-01-14-11-53-50_9726_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9746_hand.jpg", + "images/2025-01-14-11-53-50_9749_hand.jpg", + "images/2025-01-14-11-53-50_9752_hand.jpg", + "images/2025-01-14-11-53-50_9755_hand.jpg", + "images/2025-01-14-11-53-50_9758_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10287_hand.jpg", + "images/2025-01-14-11-53-50_10290_hand.jpg", + "images/2025-01-14-11-53-50_10293_hand.jpg", + "images/2025-01-14-11-53-50_10296_hand.jpg", + "images/2025-01-14-11-53-50_10299_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10311_hand.jpg", + "images/2025-01-14-11-53-50_10314_hand.jpg", + "images/2025-01-14-11-53-50_10317_hand.jpg", + "images/2025-01-14-11-53-50_10320_hand.jpg", + "images/2025-01-14-11-53-50_10323_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10335_hand.jpg", + "images/2025-01-14-11-53-50_10338_hand.jpg", + "images/2025-01-14-11-53-50_10341_hand.jpg", + "images/2025-01-14-11-53-50_10344_hand.jpg", + "images/2025-01-14-11-53-50_10347_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10461_hand.jpg", + "images/2025-01-14-11-53-50_10464_hand.jpg", + "images/2025-01-14-11-53-50_10467_hand.jpg", + "images/2025-01-14-11-53-50_10470_hand.jpg", + "images/2025-01-14-11-53-50_10473_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10504_hand.jpg", + "images/2025-01-14-11-53-50_10507_hand.jpg", + "images/2025-01-14-11-53-50_10510_hand.jpg", + "images/2025-01-14-11-53-50_10513_hand.jpg", + "images/2025-01-14-11-53-50_10516_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10546_hand.jpg", + "images/2025-01-14-11-53-50_10549_hand.jpg", + "images/2025-01-14-11-53-50_10552_hand.jpg", + "images/2025-01-14-11-53-50_10555_hand.jpg", + "images/2025-01-14-11-53-50_10558_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10684_hand.jpg", + "images/2025-01-14-11-53-50_10687_hand.jpg", + "images/2025-01-14-11-53-50_10690_hand.jpg", + "images/2025-01-14-11-53-50_10693_hand.jpg", + "images/2025-01-14-11-53-50_10696_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10714_hand.jpg", + "images/2025-01-14-11-53-50_10717_hand.jpg", + "images/2025-01-14-11-53-50_10720_hand.jpg", + "images/2025-01-14-11-53-50_10723_hand.jpg", + "images/2025-01-14-11-53-50_10726_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10745_hand.jpg", + "images/2025-01-14-11-53-50_10748_hand.jpg", + "images/2025-01-14-11-53-50_10751_hand.jpg", + "images/2025-01-14-11-53-50_10754_hand.jpg", + "images/2025-01-14-11-53-50_10757_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10882_hand.jpg", + "images/2025-01-14-11-53-50_10885_hand.jpg", + "images/2025-01-14-11-53-50_10888_hand.jpg", + "images/2025-01-14-11-53-50_10891_hand.jpg", + "images/2025-01-14-11-53-50_10894_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10924_hand.jpg", + "images/2025-01-14-11-53-50_10927_hand.jpg", + "images/2025-01-14-11-53-50_10930_hand.jpg", + "images/2025-01-14-11-53-50_10933_hand.jpg", + "images/2025-01-14-11-53-50_10936_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10967_hand.jpg", + "images/2025-01-14-11-53-50_10970_hand.jpg", + "images/2025-01-14-11-53-50_10973_hand.jpg", + "images/2025-01-14-11-53-50_10976_hand.jpg", + "images/2025-01-14-11-53-50_10979_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11444_hand.jpg", + "images/2025-01-14-11-53-50_11447_hand.jpg", + "images/2025-01-14-11-53-50_11450_hand.jpg", + "images/2025-01-14-11-53-50_11453_hand.jpg", + "images/2025-01-14-11-53-50_11456_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11481_hand.jpg", + "images/2025-01-14-11-53-50_11484_hand.jpg", + "images/2025-01-14-11-53-50_11487_hand.jpg", + "images/2025-01-14-11-53-50_11490_hand.jpg", + "images/2025-01-14-11-53-50_11493_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11518_hand.jpg", + "images/2025-01-14-11-53-50_11521_hand.jpg", + "images/2025-01-14-11-53-50_11524_hand.jpg", + "images/2025-01-14-11-53-50_11527_hand.jpg", + "images/2025-01-14-11-53-50_11530_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11652_hand.jpg", + "images/2025-01-14-11-53-50_11655_hand.jpg", + "images/2025-01-14-11-53-50_11658_hand.jpg", + "images/2025-01-14-11-53-50_11661_hand.jpg", + "images/2025-01-14-11-53-50_11664_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11685_hand.jpg", + "images/2025-01-14-11-53-50_11688_hand.jpg", + "images/2025-01-14-11-53-50_11691_hand.jpg", + "images/2025-01-14-11-53-50_11694_hand.jpg", + "images/2025-01-14-11-53-50_11697_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11718_hand.jpg", + "images/2025-01-14-11-53-50_11721_hand.jpg", + "images/2025-01-14-11-53-50_11724_hand.jpg", + "images/2025-01-14-11-53-50_11727_hand.jpg", + "images/2025-01-14-11-53-50_11730_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11859_hand.jpg", + "images/2025-01-14-11-53-50_11862_hand.jpg", + "images/2025-01-14-11-53-50_11865_hand.jpg", + "images/2025-01-14-11-53-50_11868_hand.jpg", + "images/2025-01-14-11-53-50_11871_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11902_hand.jpg", + "images/2025-01-14-11-53-50_11905_hand.jpg", + "images/2025-01-14-11-53-50_11908_hand.jpg", + "images/2025-01-14-11-53-50_11911_hand.jpg", + "images/2025-01-14-11-53-50_11914_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11944_hand.jpg", + "images/2025-01-14-11-53-50_11947_hand.jpg", + "images/2025-01-14-11-53-50_11950_hand.jpg", + "images/2025-01-14-11-53-50_11953_hand.jpg", + "images/2025-01-14-11-53-50_11956_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_12076_hand.jpg", + "images/2025-01-14-11-53-50_12079_hand.jpg", + "images/2025-01-14-11-53-50_12082_hand.jpg", + "images/2025-01-14-11-53-50_12085_hand.jpg", + "images/2025-01-14-11-53-50_12088_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_12104_hand.jpg", + "images/2025-01-14-11-53-50_12107_hand.jpg", + "images/2025-01-14-11-53-50_12110_hand.jpg", + "images/2025-01-14-11-53-50_12113_hand.jpg", + "images/2025-01-14-11-53-50_12116_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_12132_hand.jpg", + "images/2025-01-14-11-53-50_12135_hand.jpg", + "images/2025-01-14-11-53-50_12138_hand.jpg", + "images/2025-01-14-11-53-50_12141_hand.jpg", + "images/2025-01-14-11-53-50_12144_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_13392_hand.jpg", + "images/2025-01-14-11-53-50_13395_hand.jpg", + "images/2025-01-14-11-53-50_13398_hand.jpg", + "images/2025-01-14-11-53-50_13401_hand.jpg", + "images/2025-01-14-11-53-50_13404_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_13469_hand.jpg", + "images/2025-01-14-11-53-50_13472_hand.jpg", + "images/2025-01-14-11-53-50_13475_hand.jpg", + "images/2025-01-14-11-53-50_13478_hand.jpg", + "images/2025-01-14-11-53-50_13481_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_13546_hand.jpg", + "images/2025-01-14-11-53-50_13549_hand.jpg", + "images/2025-01-14-11-53-50_13552_hand.jpg", + "images/2025-01-14-11-53-50_13555_hand.jpg", + "images/2025-01-14-11-53-50_13558_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_13750_hand.jpg", + "images/2025-01-14-11-53-50_13753_hand.jpg", + "images/2025-01-14-11-53-50_13756_hand.jpg", + "images/2025-01-14-11-53-50_13759_hand.jpg", + "images/2025-01-14-11-53-50_13762_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_13782_hand.jpg", + "images/2025-01-14-11-53-50_13785_hand.jpg", + "images/2025-01-14-11-53-50_13788_hand.jpg", + "images/2025-01-14-11-53-50_13791_hand.jpg", + "images/2025-01-14-11-53-50_13794_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_13813_hand.jpg", + "images/2025-01-14-11-53-50_13816_hand.jpg", + "images/2025-01-14-11-53-50_13819_hand.jpg", + "images/2025-01-14-11-53-50_13822_hand.jpg", + "images/2025-01-14-11-53-50_13825_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_13982_hand.jpg", + "images/2025-01-14-11-53-50_13985_hand.jpg", + "images/2025-01-14-11-53-50_13988_hand.jpg", + "images/2025-01-14-11-53-50_13991_hand.jpg", + "images/2025-01-14-11-53-50_13994_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14040_hand.jpg", + "images/2025-01-14-11-53-50_14043_hand.jpg", + "images/2025-01-14-11-53-50_14046_hand.jpg", + "images/2025-01-14-11-53-50_14049_hand.jpg", + "images/2025-01-14-11-53-50_14052_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14098_hand.jpg", + "images/2025-01-14-11-53-50_14101_hand.jpg", + "images/2025-01-14-11-53-50_14104_hand.jpg", + "images/2025-01-14-11-53-50_14107_hand.jpg", + "images/2025-01-14-11-53-50_14110_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14276_hand.jpg", + "images/2025-01-14-11-53-50_14279_hand.jpg", + "images/2025-01-14-11-53-50_14282_hand.jpg", + "images/2025-01-14-11-53-50_14285_hand.jpg", + "images/2025-01-14-11-53-50_14288_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14314_hand.jpg", + "images/2025-01-14-11-53-50_14317_hand.jpg", + "images/2025-01-14-11-53-50_14320_hand.jpg", + "images/2025-01-14-11-53-50_14323_hand.jpg", + "images/2025-01-14-11-53-50_14326_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14351_hand.jpg", + "images/2025-01-14-11-53-50_14354_hand.jpg", + "images/2025-01-14-11-53-50_14357_hand.jpg", + "images/2025-01-14-11-53-50_14360_hand.jpg", + "images/2025-01-14-11-53-50_14363_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Early stage (this manipulation step has just started)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14544_hand.jpg", + "images/2025-01-14-11-53-50_14547_hand.jpg", + "images/2025-01-14-11-53-50_14550_hand.jpg", + "images/2025-01-14-11-53-50_14553_hand.jpg", + "images/2025-01-14-11-53-50_14556_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Middle stage (this manipulation step is about halfway complete)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14610_hand.jpg", + "images/2025-01-14-11-53-50_14613_hand.jpg", + "images/2025-01-14-11-53-50_14616_hand.jpg", + "images/2025-01-14-11-53-50_14619_hand.jpg", + "images/2025-01-14-11-53-50_14622_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, how far along is the current manipulation step?\nOptions:\nA. Early stage (this manipulation step has just started)\nB. Middle stage (this manipulation step is about halfway complete)\nC. Late stage (this manipulation step is nearly finished)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Late stage (this manipulation step is nearly finished)" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14675_hand.jpg", + "images/2025-01-14-11-53-50_14678_hand.jpg", + "images/2025-01-14-11-53-50_14681_hand.jpg", + "images/2025-01-14-11-53-50_14684_hand.jpg", + "images/2025-01-14-11-53-50_14687_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T_progress", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Grasp\nB. Push\nC. Align\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Grasp" + } + ], + "images": [ + "images/2025-01-14-11-53-50_163_hand.jpg", + "images/2025-01-14-11-53-50_166_hand.jpg", + "images/2025-01-14-11-53-50_169_hand.jpg", + "images/2025-01-14-11-53-50_172_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Push\nB. Align\nC. Lift\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Lift" + } + ], + "images": [ + "images/2025-01-14-11-53-50_256_hand.jpg", + "images/2025-01-14-11-53-50_259_hand.jpg", + "images/2025-01-14-11-53-50_262_hand.jpg", + "images/2025-01-14-11-53-50_265_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Nudge\nC. Release\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Approach" + } + ], + "images": [ + "images/2025-01-14-11-53-50_396_hand.jpg", + "images/2025-01-14-11-53-50_399_hand.jpg", + "images/2025-01-14-11-53-50_402_hand.jpg", + "images/2025-01-14-11-53-50_405_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Nudge\nC. Grasp\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_632_hand.jpg", + "images/2025-01-14-11-53-50_635_hand.jpg", + "images/2025-01-14-11-53-50_638_hand.jpg", + "images/2025-01-14-11-53-50_641_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Approach\nB. Lift\nC. Release\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_747_hand.jpg", + "images/2025-01-14-11-53-50_750_hand.jpg", + "images/2025-01-14-11-53-50_753_hand.jpg", + "images/2025-01-14-11-53-50_756_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Grasp\nB. Approach\nC. Align\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Grasp" + } + ], + "images": [ + "images/2025-01-14-11-53-50_871_hand.jpg", + "images/2025-01-14-11-53-50_874_hand.jpg", + "images/2025-01-14-11-53-50_877_hand.jpg", + "images/2025-01-14-11-53-50_880_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Approach\nB. Twist\nC. Lift\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Lift" + } + ], + "images": [ + "images/2025-01-14-11-53-50_999_hand.jpg", + "images/2025-01-14-11-53-50_1002_hand.jpg", + "images/2025-01-14-11-53-50_1005_hand.jpg", + "images/2025-01-14-11-53-50_1008_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Pull\nB. Approach\nC. Release\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Approach" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1078_hand.jpg", + "images/2025-01-14-11-53-50_1081_hand.jpg", + "images/2025-01-14-11-53-50_1084_hand.jpg", + "images/2025-01-14-11-53-50_1087_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Lift\nC. Pull\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1193_hand.jpg", + "images/2025-01-14-11-53-50_1196_hand.jpg", + "images/2025-01-14-11-53-50_1199_hand.jpg", + "images/2025-01-14-11-53-50_1202_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Push\nB. Align\nC. Lift\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Push" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1274_hand.jpg", + "images/2025-01-14-11-53-50_1277_hand.jpg", + "images/2025-01-14-11-53-50_1280_hand.jpg", + "images/2025-01-14-11-53-50_1283_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Push\nB. Pull\nC. Release\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1311_hand.jpg", + "images/2025-01-14-11-53-50_1314_hand.jpg", + "images/2025-01-14-11-53-50_1317_hand.jpg", + "images/2025-01-14-11-53-50_1320_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Grasp\nB. Release\nC. Align\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Grasp" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1384_hand.jpg", + "images/2025-01-14-11-53-50_1387_hand.jpg", + "images/2025-01-14-11-53-50_1390_hand.jpg", + "images/2025-01-14-11-53-50_1393_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Approach\nB. Grasp\nC. Push\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Lift" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1466_hand.jpg", + "images/2025-01-14-11-53-50_1469_hand.jpg", + "images/2025-01-14-11-53-50_1472_hand.jpg", + "images/2025-01-14-11-53-50_1475_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Push\nB. Approach\nC. Twist\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Approach" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1540_hand.jpg", + "images/2025-01-14-11-53-50_1543_hand.jpg", + "images/2025-01-14-11-53-50_1546_hand.jpg", + "images/2025-01-14-11-53-50_1549_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Pull\nB. Release\nC. Push\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1607_hand.jpg", + "images/2025-01-14-11-53-50_1610_hand.jpg", + "images/2025-01-14-11-53-50_1613_hand.jpg", + "images/2025-01-14-11-53-50_1616_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Twist\nB. Grasp\nC. Align\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Grasp" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1691_hand.jpg", + "images/2025-01-14-11-53-50_1694_hand.jpg", + "images/2025-01-14-11-53-50_1697_hand.jpg", + "images/2025-01-14-11-53-50_1700_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Approach\nB. Twist\nC. Push\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Lift" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1776_hand.jpg", + "images/2025-01-14-11-53-50_1779_hand.jpg", + "images/2025-01-14-11-53-50_1782_hand.jpg", + "images/2025-01-14-11-53-50_1785_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Approach\nB. Push\nC. Pull\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Approach" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1891_hand.jpg", + "images/2025-01-14-11-53-50_1894_hand.jpg", + "images/2025-01-14-11-53-50_1897_hand.jpg", + "images/2025-01-14-11-53-50_1900_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Nudge\nC. Release\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2020_hand.jpg", + "images/2025-01-14-11-53-50_2023_hand.jpg", + "images/2025-01-14-11-53-50_2026_hand.jpg", + "images/2025-01-14-11-53-50_2029_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Twist\nB. Nudge\nC. Push\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Push" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2117_hand.jpg", + "images/2025-01-14-11-53-50_2120_hand.jpg", + "images/2025-01-14-11-53-50_2123_hand.jpg", + "images/2025-01-14-11-53-50_2126_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Grasp\nB. Align\nC. Approach\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Grasp" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2258_hand.jpg", + "images/2025-01-14-11-53-50_2261_hand.jpg", + "images/2025-01-14-11-53-50_2264_hand.jpg", + "images/2025-01-14-11-53-50_2267_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Push\nB. Lift\nC. Pull\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Lift" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2338_hand.jpg", + "images/2025-01-14-11-53-50_2341_hand.jpg", + "images/2025-01-14-11-53-50_2344_hand.jpg", + "images/2025-01-14-11-53-50_2347_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Approach\nB. Push\nC. Grasp\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Approach" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2400_hand.jpg", + "images/2025-01-14-11-53-50_2403_hand.jpg", + "images/2025-01-14-11-53-50_2406_hand.jpg", + "images/2025-01-14-11-53-50_2409_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Push\nB. Nudge\nC. Grasp\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2576_hand.jpg", + "images/2025-01-14-11-53-50_2579_hand.jpg", + "images/2025-01-14-11-53-50_2582_hand.jpg", + "images/2025-01-14-11-53-50_2585_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Release\nB. Pull\nC. Push\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Push" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2750_hand.jpg", + "images/2025-01-14-11-53-50_2753_hand.jpg", + "images/2025-01-14-11-53-50_2756_hand.jpg", + "images/2025-01-14-11-53-50_2759_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Lift\nB. Grasp\nC. Push\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Grasp" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2891_hand.jpg", + "images/2025-01-14-11-53-50_2894_hand.jpg", + "images/2025-01-14-11-53-50_2897_hand.jpg", + "images/2025-01-14-11-53-50_2900_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Push\nB. Lift\nC. Approach\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Lift" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2990_hand.jpg", + "images/2025-01-14-11-53-50_2993_hand.jpg", + "images/2025-01-14-11-53-50_2996_hand.jpg", + "images/2025-01-14-11-53-50_2999_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Lift\nC. Push\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Approach" + } + ], + "images": [ + "images/2025-01-14-11-53-50_3102_hand.jpg", + "images/2025-01-14-11-53-50_3105_hand.jpg", + "images/2025-01-14-11-53-50_3108_hand.jpg", + "images/2025-01-14-11-53-50_3111_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Pull\nC. Twist\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_3366_hand.jpg", + "images/2025-01-14-11-53-50_3369_hand.jpg", + "images/2025-01-14-11-53-50_3372_hand.jpg", + "images/2025-01-14-11-53-50_3375_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Pull\nB. Nudge\nC. Push\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Push" + } + ], + "images": [ + "images/2025-01-14-11-53-50_3589_hand.jpg", + "images/2025-01-14-11-53-50_3592_hand.jpg", + "images/2025-01-14-11-53-50_3595_hand.jpg", + "images/2025-01-14-11-53-50_3598_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Approach\nB. Pull\nC. Grasp\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Grasp" + } + ], + "images": [ + "images/2025-01-14-11-53-50_3682_hand.jpg", + "images/2025-01-14-11-53-50_3685_hand.jpg", + "images/2025-01-14-11-53-50_3688_hand.jpg", + "images/2025-01-14-11-53-50_3691_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Lift\nB. Approach\nC. Align\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_3794_hand.jpg", + "images/2025-01-14-11-53-50_3797_hand.jpg", + "images/2025-01-14-11-53-50_3800_hand.jpg", + "images/2025-01-14-11-53-50_3803_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Release\nB. Grasp\nC. Lift\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Push" + } + ], + "images": [ + "images/2025-01-14-11-53-50_3909_hand.jpg", + "images/2025-01-14-11-53-50_3912_hand.jpg", + "images/2025-01-14-11-53-50_3915_hand.jpg", + "images/2025-01-14-11-53-50_3918_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Twist\nB. Grasp\nC. Release\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Grasp" + } + ], + "images": [ + "images/2025-01-14-11-53-50_4047_hand.jpg", + "images/2025-01-14-11-53-50_4050_hand.jpg", + "images/2025-01-14-11-53-50_4053_hand.jpg", + "images/2025-01-14-11-53-50_4056_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Lift\nB. Pull\nC. Align\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Lift" + } + ], + "images": [ + "images/2025-01-14-11-53-50_4150_hand.jpg", + "images/2025-01-14-11-53-50_4153_hand.jpg", + "images/2025-01-14-11-53-50_4156_hand.jpg", + "images/2025-01-14-11-53-50_4159_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Nudge\nB. Twist\nC. Lift\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Approach" + } + ], + "images": [ + "images/2025-01-14-11-53-50_4260_hand.jpg", + "images/2025-01-14-11-53-50_4263_hand.jpg", + "images/2025-01-14-11-53-50_4266_hand.jpg", + "images/2025-01-14-11-53-50_4269_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Pull\nB. Release\nC. Nudge\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_4506_hand.jpg", + "images/2025-01-14-11-53-50_4509_hand.jpg", + "images/2025-01-14-11-53-50_4512_hand.jpg", + "images/2025-01-14-11-53-50_4515_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Release\nB. Pull\nC. Align\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Twist" + } + ], + "images": [ + "images/2025-01-14-11-53-50_4732_hand.jpg", + "images/2025-01-14-11-53-50_4735_hand.jpg", + "images/2025-01-14-11-53-50_4738_hand.jpg", + "images/2025-01-14-11-53-50_4741_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Approach\nB. Push\nC. Nudge\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_4826_hand.jpg", + "images/2025-01-14-11-53-50_4829_hand.jpg", + "images/2025-01-14-11-53-50_4832_hand.jpg", + "images/2025-01-14-11-53-50_4835_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Approach\nB. Grasp\nC. Twist\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Grasp" + } + ], + "images": [ + "images/2025-01-14-11-53-50_4944_hand.jpg", + "images/2025-01-14-11-53-50_4947_hand.jpg", + "images/2025-01-14-11-53-50_4950_hand.jpg", + "images/2025-01-14-11-53-50_4953_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Grasp\nB. Pull\nC. Nudge\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Grasp" + } + ], + "images": [ + "images/2025-01-14-11-53-50_5106_hand.jpg", + "images/2025-01-14-11-53-50_5109_hand.jpg", + "images/2025-01-14-11-53-50_5112_hand.jpg", + "images/2025-01-14-11-53-50_5115_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Push\nC. Lift\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Lift" + } + ], + "images": [ + "images/2025-01-14-11-53-50_5174_hand.jpg", + "images/2025-01-14-11-53-50_5177_hand.jpg", + "images/2025-01-14-11-53-50_5180_hand.jpg", + "images/2025-01-14-11-53-50_5183_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Approach\nB. Release\nC. Nudge\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Approach" + } + ], + "images": [ + "images/2025-01-14-11-53-50_5257_hand.jpg", + "images/2025-01-14-11-53-50_5260_hand.jpg", + "images/2025-01-14-11-53-50_5263_hand.jpg", + "images/2025-01-14-11-53-50_5266_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Release\nC. Nudge\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_5488_hand.jpg", + "images/2025-01-14-11-53-50_5491_hand.jpg", + "images/2025-01-14-11-53-50_5494_hand.jpg", + "images/2025-01-14-11-53-50_5497_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Push\nB. Approach\nC. Lift\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Approach" + } + ], + "images": [ + "images/2025-01-14-11-53-50_5730_hand.jpg", + "images/2025-01-14-11-53-50_5733_hand.jpg", + "images/2025-01-14-11-53-50_5736_hand.jpg", + "images/2025-01-14-11-53-50_5739_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Grasp\nB. Nudge\nC. Lift\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_5851_hand.jpg", + "images/2025-01-14-11-53-50_5854_hand.jpg", + "images/2025-01-14-11-53-50_5857_hand.jpg", + "images/2025-01-14-11-53-50_5860_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Lift\nB. Release\nC. Align\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_5906_hand.jpg", + "images/2025-01-14-11-53-50_5909_hand.jpg", + "images/2025-01-14-11-53-50_5912_hand.jpg", + "images/2025-01-14-11-53-50_5915_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Push\nB. Grasp\nC. Lift\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Grasp" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6002_hand.jpg", + "images/2025-01-14-11-53-50_6005_hand.jpg", + "images/2025-01-14-11-53-50_6008_hand.jpg", + "images/2025-01-14-11-53-50_6011_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Grasp\nB. Push\nC. Twist\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Grasp" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6136_hand.jpg", + "images/2025-01-14-11-53-50_6139_hand.jpg", + "images/2025-01-14-11-53-50_6142_hand.jpg", + "images/2025-01-14-11-53-50_6145_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Twist\nB. Lift\nC. Release\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Lift" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6203_hand.jpg", + "images/2025-01-14-11-53-50_6206_hand.jpg", + "images/2025-01-14-11-53-50_6209_hand.jpg", + "images/2025-01-14-11-53-50_6212_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Push\nB. Approach\nC. Grasp\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Approach" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6313_hand.jpg", + "images/2025-01-14-11-53-50_6316_hand.jpg", + "images/2025-01-14-11-53-50_6319_hand.jpg", + "images/2025-01-14-11-53-50_6322_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Approach\nB. Align\nC. Lift\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6430_hand.jpg", + "images/2025-01-14-11-53-50_6433_hand.jpg", + "images/2025-01-14-11-53-50_6436_hand.jpg", + "images/2025-01-14-11-53-50_6439_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Grasp\nB. Lift\nC. Release\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6466_hand.jpg", + "images/2025-01-14-11-53-50_6469_hand.jpg", + "images/2025-01-14-11-53-50_6472_hand.jpg", + "images/2025-01-14-11-53-50_6475_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Grasp\nC. Push\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Grasp" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6565_hand.jpg", + "images/2025-01-14-11-53-50_6568_hand.jpg", + "images/2025-01-14-11-53-50_6571_hand.jpg", + "images/2025-01-14-11-53-50_6574_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Pull\nB. Lift\nC. Grasp\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Lift" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6670_hand.jpg", + "images/2025-01-14-11-53-50_6673_hand.jpg", + "images/2025-01-14-11-53-50_6676_hand.jpg", + "images/2025-01-14-11-53-50_6679_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Release\nB. Approach\nC. Twist\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Approach" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6786_hand.jpg", + "images/2025-01-14-11-53-50_6789_hand.jpg", + "images/2025-01-14-11-53-50_6792_hand.jpg", + "images/2025-01-14-11-53-50_6795_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Nudge\nC. Twist\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6942_hand.jpg", + "images/2025-01-14-11-53-50_6945_hand.jpg", + "images/2025-01-14-11-53-50_6948_hand.jpg", + "images/2025-01-14-11-53-50_6951_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Grasp\nB. Approach\nC. Release\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7010_hand.jpg", + "images/2025-01-14-11-53-50_7013_hand.jpg", + "images/2025-01-14-11-53-50_7016_hand.jpg", + "images/2025-01-14-11-53-50_7019_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Grasp\nB. Release\nC. Push\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Grasp" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7108_hand.jpg", + "images/2025-01-14-11-53-50_7111_hand.jpg", + "images/2025-01-14-11-53-50_7114_hand.jpg", + "images/2025-01-14-11-53-50_7117_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Twist\nB. Align\nC. Lift\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Lift" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7202_hand.jpg", + "images/2025-01-14-11-53-50_7205_hand.jpg", + "images/2025-01-14-11-53-50_7208_hand.jpg", + "images/2025-01-14-11-53-50_7211_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Nudge\nC. Approach\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Approach" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7270_hand.jpg", + "images/2025-01-14-11-53-50_7273_hand.jpg", + "images/2025-01-14-11-53-50_7276_hand.jpg", + "images/2025-01-14-11-53-50_7279_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Grasp\nB. Pull\nC. Lift\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7336_hand.jpg", + "images/2025-01-14-11-53-50_7339_hand.jpg", + "images/2025-01-14-11-53-50_7342_hand.jpg", + "images/2025-01-14-11-53-50_7345_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Grasp\nB. Release\nC. Approach\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Grasp" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7406_hand.jpg", + "images/2025-01-14-11-53-50_7409_hand.jpg", + "images/2025-01-14-11-53-50_7412_hand.jpg", + "images/2025-01-14-11-53-50_7415_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Twist\nC. Pull\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Lift" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7476_hand.jpg", + "images/2025-01-14-11-53-50_7479_hand.jpg", + "images/2025-01-14-11-53-50_7482_hand.jpg", + "images/2025-01-14-11-53-50_7485_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Lift\nB. Grasp\nC. Nudge\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Approach" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7534_hand.jpg", + "images/2025-01-14-11-53-50_7537_hand.jpg", + "images/2025-01-14-11-53-50_7540_hand.jpg", + "images/2025-01-14-11-53-50_7543_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Pull\nB. Release\nC. Nudge\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7588_hand.jpg", + "images/2025-01-14-11-53-50_7591_hand.jpg", + "images/2025-01-14-11-53-50_7594_hand.jpg", + "images/2025-01-14-11-53-50_7597_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Grasp\nB. Nudge\nC. Pull\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Grasp" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7653_hand.jpg", + "images/2025-01-14-11-53-50_7656_hand.jpg", + "images/2025-01-14-11-53-50_7659_hand.jpg", + "images/2025-01-14-11-53-50_7662_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Lift\nC. Nudge\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Lift" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7720_hand.jpg", + "images/2025-01-14-11-53-50_7723_hand.jpg", + "images/2025-01-14-11-53-50_7726_hand.jpg", + "images/2025-01-14-11-53-50_7729_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Nudge\nB. Twist\nC. Align\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Approach" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7812_hand.jpg", + "images/2025-01-14-11-53-50_7815_hand.jpg", + "images/2025-01-14-11-53-50_7818_hand.jpg", + "images/2025-01-14-11-53-50_7821_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Lift\nC. Nudge\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7926_hand.jpg", + "images/2025-01-14-11-53-50_7929_hand.jpg", + "images/2025-01-14-11-53-50_7932_hand.jpg", + "images/2025-01-14-11-53-50_7935_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Grasp\nB. Lift\nC. Approach\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Push" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7972_hand.jpg", + "images/2025-01-14-11-53-50_7975_hand.jpg", + "images/2025-01-14-11-53-50_7978_hand.jpg", + "images/2025-01-14-11-53-50_7981_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Approach\nB. Twist\nC. Release\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7999_hand.jpg", + "images/2025-01-14-11-53-50_8002_hand.jpg", + "images/2025-01-14-11-53-50_8005_hand.jpg", + "images/2025-01-14-11-53-50_8008_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Twist\nB. Grasp\nC. Approach\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Grasp" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8080_hand.jpg", + "images/2025-01-14-11-53-50_8083_hand.jpg", + "images/2025-01-14-11-53-50_8086_hand.jpg", + "images/2025-01-14-11-53-50_8089_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Approach\nB. Pull\nC. Align\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Lift" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8160_hand.jpg", + "images/2025-01-14-11-53-50_8163_hand.jpg", + "images/2025-01-14-11-53-50_8166_hand.jpg", + "images/2025-01-14-11-53-50_8169_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Push\nB. Approach\nC. Grasp\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Approach" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8228_hand.jpg", + "images/2025-01-14-11-53-50_8231_hand.jpg", + "images/2025-01-14-11-53-50_8234_hand.jpg", + "images/2025-01-14-11-53-50_8237_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Push\nB. Align\nC. Twist\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8300_hand.jpg", + "images/2025-01-14-11-53-50_8303_hand.jpg", + "images/2025-01-14-11-53-50_8306_hand.jpg", + "images/2025-01-14-11-53-50_8309_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Twist\nB. Grasp\nC. Approach\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Grasp" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8384_hand.jpg", + "images/2025-01-14-11-53-50_8387_hand.jpg", + "images/2025-01-14-11-53-50_8390_hand.jpg", + "images/2025-01-14-11-53-50_8393_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Approach\nB. Release\nC. Lift\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Lift" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8458_hand.jpg", + "images/2025-01-14-11-53-50_8461_hand.jpg", + "images/2025-01-14-11-53-50_8464_hand.jpg", + "images/2025-01-14-11-53-50_8467_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Release\nB. Push\nC. Approach\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Approach" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8582_hand.jpg", + "images/2025-01-14-11-53-50_8585_hand.jpg", + "images/2025-01-14-11-53-50_8588_hand.jpg", + "images/2025-01-14-11-53-50_8591_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Lift\nB. Grasp\nC. Align\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8850_hand.jpg", + "images/2025-01-14-11-53-50_8853_hand.jpg", + "images/2025-01-14-11-53-50_8856_hand.jpg", + "images/2025-01-14-11-53-50_8859_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Lift\nB. Release\nC. Nudge\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Push" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9096_hand.jpg", + "images/2025-01-14-11-53-50_9099_hand.jpg", + "images/2025-01-14-11-53-50_9102_hand.jpg", + "images/2025-01-14-11-53-50_9105_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Approach\nB. Release\nC. Grasp\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Grasp" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9325_hand.jpg", + "images/2025-01-14-11-53-50_9328_hand.jpg", + "images/2025-01-14-11-53-50_9331_hand.jpg", + "images/2025-01-14-11-53-50_9334_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Pull\nB. Lift\nC. Grasp\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Lift" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9472_hand.jpg", + "images/2025-01-14-11-53-50_9475_hand.jpg", + "images/2025-01-14-11-53-50_9478_hand.jpg", + "images/2025-01-14-11-53-50_9481_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Lift\nB. Release\nC. Grasp\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Approach" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9544_hand.jpg", + "images/2025-01-14-11-53-50_9547_hand.jpg", + "images/2025-01-14-11-53-50_9550_hand.jpg", + "images/2025-01-14-11-53-50_9553_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Push\nB. Release\nC. Twist\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9612_hand.jpg", + "images/2025-01-14-11-53-50_9615_hand.jpg", + "images/2025-01-14-11-53-50_9618_hand.jpg", + "images/2025-01-14-11-53-50_9621_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Nudge\nB. Align\nC. Push\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Grasp" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9704_hand.jpg", + "images/2025-01-14-11-53-50_9707_hand.jpg", + "images/2025-01-14-11-53-50_9710_hand.jpg", + "images/2025-01-14-11-53-50_9713_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Approach\nC. Nudge\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Lift" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9798_hand.jpg", + "images/2025-01-14-11-53-50_9801_hand.jpg", + "images/2025-01-14-11-53-50_9804_hand.jpg", + "images/2025-01-14-11-53-50_9807_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Lift\nB. Pull\nC. Approach\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Approach" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9961_hand.jpg", + "images/2025-01-14-11-53-50_9964_hand.jpg", + "images/2025-01-14-11-53-50_9967_hand.jpg", + "images/2025-01-14-11-53-50_9970_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Pull\nB. Nudge\nC. Lift\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10177_hand.jpg", + "images/2025-01-14-11-53-50_10180_hand.jpg", + "images/2025-01-14-11-53-50_10183_hand.jpg", + "images/2025-01-14-11-53-50_10186_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Approach\nB. Release\nC. Push\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Approach" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10304_hand.jpg", + "images/2025-01-14-11-53-50_10307_hand.jpg", + "images/2025-01-14-11-53-50_10310_hand.jpg", + "images/2025-01-14-11-53-50_10313_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Grasp\nB. Release\nC. Approach\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10374_hand.jpg", + "images/2025-01-14-11-53-50_10377_hand.jpg", + "images/2025-01-14-11-53-50_10380_hand.jpg", + "images/2025-01-14-11-53-50_10383_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Twist\nB. Nudge\nC. Lift\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Grasp" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10490_hand.jpg", + "images/2025-01-14-11-53-50_10493_hand.jpg", + "images/2025-01-14-11-53-50_10496_hand.jpg", + "images/2025-01-14-11-53-50_10499_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Nudge\nC. Lift\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Lift" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10614_hand.jpg", + "images/2025-01-14-11-53-50_10617_hand.jpg", + "images/2025-01-14-11-53-50_10620_hand.jpg", + "images/2025-01-14-11-53-50_10623_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Approach\nB. Twist\nC. Lift\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Approach" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10691_hand.jpg", + "images/2025-01-14-11-53-50_10694_hand.jpg", + "images/2025-01-14-11-53-50_10697_hand.jpg", + "images/2025-01-14-11-53-50_10700_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Twist\nB. Push\nC. Align\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10771_hand.jpg", + "images/2025-01-14-11-53-50_10774_hand.jpg", + "images/2025-01-14-11-53-50_10777_hand.jpg", + "images/2025-01-14-11-53-50_10780_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Release\nC. Pull\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10794_hand.jpg", + "images/2025-01-14-11-53-50_10797_hand.jpg", + "images/2025-01-14-11-53-50_10800_hand.jpg", + "images/2025-01-14-11-53-50_10803_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Lift\nB. Align\nC. Nudge\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Grasp" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10906_hand.jpg", + "images/2025-01-14-11-53-50_10909_hand.jpg", + "images/2025-01-14-11-53-50_10912_hand.jpg", + "images/2025-01-14-11-53-50_10915_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Lift\nB. Align\nC. Twist\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Lift" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11028_hand.jpg", + "images/2025-01-14-11-53-50_11031_hand.jpg", + "images/2025-01-14-11-53-50_11034_hand.jpg", + "images/2025-01-14-11-53-50_11037_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Twist\nB. Approach\nC. Align\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Approach" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11168_hand.jpg", + "images/2025-01-14-11-53-50_11171_hand.jpg", + "images/2025-01-14-11-53-50_11174_hand.jpg", + "images/2025-01-14-11-53-50_11177_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Release\nB. Push\nC. Nudge\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11330_hand.jpg", + "images/2025-01-14-11-53-50_11333_hand.jpg", + "images/2025-01-14-11-53-50_11336_hand.jpg", + "images/2025-01-14-11-53-50_11339_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Lift\nB. Grasp\nC. Nudge\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Approach" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11458_hand.jpg", + "images/2025-01-14-11-53-50_11461_hand.jpg", + "images/2025-01-14-11-53-50_11464_hand.jpg", + "images/2025-01-14-11-53-50_11467_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Approach\nB. Pull\nC. Align\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11557_hand.jpg", + "images/2025-01-14-11-53-50_11560_hand.jpg", + "images/2025-01-14-11-53-50_11563_hand.jpg", + "images/2025-01-14-11-53-50_11566_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Nudge\nB. Push\nC. Release\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11580_hand.jpg", + "images/2025-01-14-11-53-50_11583_hand.jpg", + "images/2025-01-14-11-53-50_11586_hand.jpg", + "images/2025-01-14-11-53-50_11589_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Twist\nB. Grasp\nC. Align\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Grasp" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11674_hand.jpg", + "images/2025-01-14-11-53-50_11677_hand.jpg", + "images/2025-01-14-11-53-50_11680_hand.jpg", + "images/2025-01-14-11-53-50_11683_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Pull\nB. Grasp\nC. Lift\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Lift" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11769_hand.jpg", + "images/2025-01-14-11-53-50_11772_hand.jpg", + "images/2025-01-14-11-53-50_11775_hand.jpg", + "images/2025-01-14-11-53-50_11778_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Nudge\nB. Grasp\nC. Release\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Approach" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11864_hand.jpg", + "images/2025-01-14-11-53-50_11867_hand.jpg", + "images/2025-01-14-11-53-50_11870_hand.jpg", + "images/2025-01-14-11-53-50_11873_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Nudge\nB. Align\nC. Grasp\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11975_hand.jpg", + "images/2025-01-14-11-53-50_11978_hand.jpg", + "images/2025-01-14-11-53-50_11981_hand.jpg", + "images/2025-01-14-11-53-50_11984_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Nudge\nB. Grasp\nC. Push\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_12012_hand.jpg", + "images/2025-01-14-11-53-50_12015_hand.jpg", + "images/2025-01-14-11-53-50_12018_hand.jpg", + "images/2025-01-14-11-53-50_12021_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Push\nC. Grasp\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Grasp" + } + ], + "images": [ + "images/2025-01-14-11-53-50_12094_hand.jpg", + "images/2025-01-14-11-53-50_12097_hand.jpg", + "images/2025-01-14-11-53-50_12100_hand.jpg", + "images/2025-01-14-11-53-50_12103_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Pull\nB. Nudge\nC. Lift\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Lift" + } + ], + "images": [ + "images/2025-01-14-11-53-50_12174_hand.jpg", + "images/2025-01-14-11-53-50_12177_hand.jpg", + "images/2025-01-14-11-53-50_12180_hand.jpg", + "images/2025-01-14-11-53-50_12183_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Approach\nB. Align\nC. Pull\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Approach" + } + ], + "images": [ + "images/2025-01-14-11-53-50_12316_hand.jpg", + "images/2025-01-14-11-53-50_12319_hand.jpg", + "images/2025-01-14-11-53-50_12322_hand.jpg", + "images/2025-01-14-11-53-50_12325_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Twist\nB. Release\nC. Align\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_12848_hand.jpg", + "images/2025-01-14-11-53-50_12851_hand.jpg", + "images/2025-01-14-11-53-50_12854_hand.jpg", + "images/2025-01-14-11-53-50_12857_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Pull\nB. Release\nC. Twist\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Approach" + } + ], + "images": [ + "images/2025-01-14-11-53-50_13362_hand.jpg", + "images/2025-01-14-11-53-50_13365_hand.jpg", + "images/2025-01-14-11-53-50_13368_hand.jpg", + "images/2025-01-14-11-53-50_13371_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Twist\nB. Lift\nC. Align\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_13564_hand.jpg", + "images/2025-01-14-11-53-50_13567_hand.jpg", + "images/2025-01-14-11-53-50_13570_hand.jpg", + "images/2025-01-14-11-53-50_13573_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Release\nB. Grasp\nC. Pull\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_13671_hand.jpg", + "images/2025-01-14-11-53-50_13674_hand.jpg", + "images/2025-01-14-11-53-50_13677_hand.jpg", + "images/2025-01-14-11-53-50_13680_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Release\nB. Grasp\nC. Pull\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Grasp" + } + ], + "images": [ + "images/2025-01-14-11-53-50_13770_hand.jpg", + "images/2025-01-14-11-53-50_13773_hand.jpg", + "images/2025-01-14-11-53-50_13776_hand.jpg", + "images/2025-01-14-11-53-50_13779_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Grasp\nB. Align\nC. Lift\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Lift" + } + ], + "images": [ + "images/2025-01-14-11-53-50_13860_hand.jpg", + "images/2025-01-14-11-53-50_13863_hand.jpg", + "images/2025-01-14-11-53-50_13866_hand.jpg", + "images/2025-01-14-11-53-50_13869_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Approach\nB. Lift\nC. Release\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Approach" + } + ], + "images": [ + "images/2025-01-14-11-53-50_13980_hand.jpg", + "images/2025-01-14-11-53-50_13983_hand.jpg", + "images/2025-01-14-11-53-50_13986_hand.jpg", + "images/2025-01-14-11-53-50_13989_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Twist\nB. Pull\nC. Align\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14138_hand.jpg", + "images/2025-01-14-11-53-50_14141_hand.jpg", + "images/2025-01-14-11-53-50_14144_hand.jpg", + "images/2025-01-14-11-53-50_14147_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Release\nB. Twist\nC. Pull\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14199_hand.jpg", + "images/2025-01-14-11-53-50_14202_hand.jpg", + "images/2025-01-14-11-53-50_14205_hand.jpg", + "images/2025-01-14-11-53-50_14208_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Grasp\nB. Twist\nC. Approach\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Grasp" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14306_hand.jpg", + "images/2025-01-14-11-53-50_14309_hand.jpg", + "images/2025-01-14-11-53-50_14312_hand.jpg", + "images/2025-01-14-11-53-50_14315_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Lift\nB. Approach\nC. Twist\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Lift" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14414_hand.jpg", + "images/2025-01-14-11-53-50_14417_hand.jpg", + "images/2025-01-14-11-53-50_14420_hand.jpg", + "images/2025-01-14-11-53-50_14423_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Grasp\nC. Pull\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Approach" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14497_hand.jpg", + "images/2025-01-14-11-53-50_14500_hand.jpg", + "images/2025-01-14-11-53-50_14503_hand.jpg", + "images/2025-01-14-11-53-50_14506_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Pull\nB. Push\nC. Align\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14610_hand.jpg", + "images/2025-01-14-11-53-50_14613_hand.jpg", + "images/2025-01-14-11-53-50_14616_hand.jpg", + "images/2025-01-14-11-53-50_14619_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Align\nB. Approach\nC. Lift\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Twist" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14712_hand.jpg", + "images/2025-01-14-11-53-50_14715_hand.jpg", + "images/2025-01-14-11-53-50_14718_hand.jpg", + "images/2025-01-14-11-53-50_14721_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWhat is the current low-level action in these ordered frames?\nOptions:\nA. Nudge\nB. Pull\nC. Release\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14786_hand.jpg", + "images/2025-01-14-11-53-50_14789_hand.jpg", + "images/2025-01-14-11-53-50_14792_hand.jpg", + "images/2025-01-14-11-53-50_14795_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T10", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Pick USB', what low-level action should happen next?\nOptions:\nA. Push\nB. Twist\nC. Nudge\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Lift" + } + ], + "images": [ + "images/2025-01-14-11-53-50_192_hand.jpg", + "images/2025-01-14-11-53-50_195_hand.jpg", + "images/2025-01-14-11-53-50_198_hand.jpg", + "images/2025-01-14-11-53-50_201_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert USB', what low-level action should happen next?\nOptions:\nA. Grasp\nB. Twist\nC. Nudge\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_440_hand.jpg", + "images/2025-01-14-11-53-50_443_hand.jpg", + "images/2025-01-14-11-53-50_446_hand.jpg", + "images/2025-01-14-11-53-50_449_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert USB', what low-level action should happen next?\nOptions:\nA. Push\nB. Grasp\nC. Release\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_669_hand.jpg", + "images/2025-01-14-11-53-50_672_hand.jpg", + "images/2025-01-14-11-53-50_675_hand.jpg", + "images/2025-01-14-11-53-50_678_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Pick Ethernet', what low-level action should happen next?\nOptions:\nA. Align\nB. Lift\nC. Approach\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Lift" + } + ], + "images": [ + "images/2025-01-14-11-53-50_911_hand.jpg", + "images/2025-01-14-11-53-50_914_hand.jpg", + "images/2025-01-14-11-53-50_917_hand.jpg", + "images/2025-01-14-11-53-50_920_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert Ethernet', what low-level action should happen next?\nOptions:\nA. Push\nB. Nudge\nC. Align\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1101_hand.jpg", + "images/2025-01-14-11-53-50_1104_hand.jpg", + "images/2025-01-14-11-53-50_1107_hand.jpg", + "images/2025-01-14-11-53-50_1110_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert Ethernet', what low-level action should happen next?\nOptions:\nA. Nudge\nB. Push\nC. Release\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Push" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1210_hand.jpg", + "images/2025-01-14-11-53-50_1213_hand.jpg", + "images/2025-01-14-11-53-50_1216_hand.jpg", + "images/2025-01-14-11-53-50_1219_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert Ethernet', what low-level action should happen next?\nOptions:\nA. Lift\nB. Grasp\nC. Release\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1284_hand.jpg", + "images/2025-01-14-11-53-50_1287_hand.jpg", + "images/2025-01-14-11-53-50_1290_hand.jpg", + "images/2025-01-14-11-53-50_1293_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Pick small gear', what low-level action should happen next?\nOptions:\nA. Nudge\nB. Lift\nC. Release\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Lift" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1408_hand.jpg", + "images/2025-01-14-11-53-50_1411_hand.jpg", + "images/2025-01-14-11-53-50_1414_hand.jpg", + "images/2025-01-14-11-53-50_1417_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Place small gear', what low-level action should happen next?\nOptions:\nA. Pull\nB. Release\nC. Grasp\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1560_hand.jpg", + "images/2025-01-14-11-53-50_1563_hand.jpg", + "images/2025-01-14-11-53-50_1566_hand.jpg", + "images/2025-01-14-11-53-50_1569_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Pick small gear', what low-level action should happen next?\nOptions:\nA. Approach\nB. Lift\nC. Pull\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Lift" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1716_hand.jpg", + "images/2025-01-14-11-53-50_1719_hand.jpg", + "images/2025-01-14-11-53-50_1722_hand.jpg", + "images/2025-01-14-11-53-50_1725_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert small gear', what low-level action should happen next?\nOptions:\nA. Align\nB. Push\nC. Twist\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1926_hand.jpg", + "images/2025-01-14-11-53-50_1929_hand.jpg", + "images/2025-01-14-11-53-50_1932_hand.jpg", + "images/2025-01-14-11-53-50_1935_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert small gear', what low-level action should happen next?\nOptions:\nA. Approach\nB. Push\nC. Pull\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Push" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2030_hand.jpg", + "images/2025-01-14-11-53-50_2033_hand.jpg", + "images/2025-01-14-11-53-50_2036_hand.jpg", + "images/2025-01-14-11-53-50_2039_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Pick large gear', what low-level action should happen next?\nOptions:\nA. Lift\nB. Nudge\nC. Twist\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Lift" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2283_hand.jpg", + "images/2025-01-14-11-53-50_2286_hand.jpg", + "images/2025-01-14-11-53-50_2289_hand.jpg", + "images/2025-01-14-11-53-50_2292_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert large gear', what low-level action should happen next?\nOptions:\nA. Nudge\nB. Push\nC. Align\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2418_hand.jpg", + "images/2025-01-14-11-53-50_2421_hand.jpg", + "images/2025-01-14-11-53-50_2424_hand.jpg", + "images/2025-01-14-11-53-50_2427_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert large gear', what low-level action should happen next?\nOptions:\nA. Push\nB. Release\nC. Approach\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Push" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2619_hand.jpg", + "images/2025-01-14-11-53-50_2622_hand.jpg", + "images/2025-01-14-11-53-50_2625_hand.jpg", + "images/2025-01-14-11-53-50_2628_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Pick medium gear', what low-level action should happen next?\nOptions:\nA. Release\nB. Approach\nC. Twist\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Lift" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2922_hand.jpg", + "images/2025-01-14-11-53-50_2925_hand.jpg", + "images/2025-01-14-11-53-50_2928_hand.jpg", + "images/2025-01-14-11-53-50_2931_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert medium gear', what low-level action should happen next?\nOptions:\nA. Grasp\nB. Nudge\nC. Lift\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_3135_hand.jpg", + "images/2025-01-14-11-53-50_3138_hand.jpg", + "images/2025-01-14-11-53-50_3141_hand.jpg", + "images/2025-01-14-11-53-50_3144_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert medium gear', what low-level action should happen next?\nOptions:\nA. Nudge\nB. Grasp\nC. Twist\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Push" + } + ], + "images": [ + "images/2025-01-14-11-53-50_3425_hand.jpg", + "images/2025-01-14-11-53-50_3428_hand.jpg", + "images/2025-01-14-11-53-50_3431_hand.jpg", + "images/2025-01-14-11-53-50_3434_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert medium gear', what low-level action should happen next?\nOptions:\nA. Grasp\nB. Pull\nC. Release\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Grasp" + } + ], + "images": [ + "images/2025-01-14-11-53-50_3608_hand.jpg", + "images/2025-01-14-11-53-50_3611_hand.jpg", + "images/2025-01-14-11-53-50_3614_hand.jpg", + "images/2025-01-14-11-53-50_3617_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert medium gear', what low-level action should happen next?\nOptions:\nA. Push\nB. Align\nC. Twist\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_3696_hand.jpg", + "images/2025-01-14-11-53-50_3699_hand.jpg", + "images/2025-01-14-11-53-50_3702_hand.jpg", + "images/2025-01-14-11-53-50_3705_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert medium gear', what low-level action should happen next?\nOptions:\nA. Pull\nB. Lift\nC. Release\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Push" + } + ], + "images": [ + "images/2025-01-14-11-53-50_3819_hand.jpg", + "images/2025-01-14-11-53-50_3822_hand.jpg", + "images/2025-01-14-11-53-50_3825_hand.jpg", + "images/2025-01-14-11-53-50_3828_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Pick BNC', what low-level action should happen next?\nOptions:\nA. Lift\nB. Pull\nC. Twist\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Lift" + } + ], + "images": [ + "images/2025-01-14-11-53-50_4080_hand.jpg", + "images/2025-01-14-11-53-50_4083_hand.jpg", + "images/2025-01-14-11-53-50_4086_hand.jpg", + "images/2025-01-14-11-53-50_4089_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert BNC', what low-level action should happen next?\nOptions:\nA. Nudge\nB. Grasp\nC. Align\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_4297_hand.jpg", + "images/2025-01-14-11-53-50_4300_hand.jpg", + "images/2025-01-14-11-53-50_4303_hand.jpg", + "images/2025-01-14-11-53-50_4306_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert BNC', what low-level action should happen next?\nOptions:\nA. Nudge\nB. Approach\nC. Twist\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Twist" + } + ], + "images": [ + "images/2025-01-14-11-53-50_4556_hand.jpg", + "images/2025-01-14-11-53-50_4559_hand.jpg", + "images/2025-01-14-11-53-50_4562_hand.jpg", + "images/2025-01-14-11-53-50_4565_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert BNC', what low-level action should happen next?\nOptions:\nA. Push\nB. Lift\nC. Nudge\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_4761_hand.jpg", + "images/2025-01-14-11-53-50_4764_hand.jpg", + "images/2025-01-14-11-53-50_4767_hand.jpg", + "images/2025-01-14-11-53-50_4770_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Pick round peg 1', what low-level action should happen next?\nOptions:\nA. Release\nB. Lift\nC. Pull\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Lift" + } + ], + "images": [ + "images/2025-01-14-11-53-50_5125_hand.jpg", + "images/2025-01-14-11-53-50_5128_hand.jpg", + "images/2025-01-14-11-53-50_5131_hand.jpg", + "images/2025-01-14-11-53-50_5134_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert round peg 1', what low-level action should happen next?\nOptions:\nA. Twist\nB. Align\nC. Release\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_5280_hand.jpg", + "images/2025-01-14-11-53-50_5283_hand.jpg", + "images/2025-01-14-11-53-50_5286_hand.jpg", + "images/2025-01-14-11-53-50_5289_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert round peg 1', what low-level action should happen next?\nOptions:\nA. Nudge\nB. Align\nC. Grasp\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_5757_hand.jpg", + "images/2025-01-14-11-53-50_5760_hand.jpg", + "images/2025-01-14-11-53-50_5763_hand.jpg", + "images/2025-01-14-11-53-50_5766_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert round peg 1', what low-level action should happen next?\nOptions:\nA. Twist\nB. Release\nC. Approach\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_5866_hand.jpg", + "images/2025-01-14-11-53-50_5869_hand.jpg", + "images/2025-01-14-11-53-50_5872_hand.jpg", + "images/2025-01-14-11-53-50_5875_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Pick round peg 2', what low-level action should happen next?\nOptions:\nA. Lift\nB. Approach\nC. Pull\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Lift" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6154_hand.jpg", + "images/2025-01-14-11-53-50_6157_hand.jpg", + "images/2025-01-14-11-53-50_6160_hand.jpg", + "images/2025-01-14-11-53-50_6163_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert round peg 2', what low-level action should happen next?\nOptions:\nA. Align\nB. Lift\nC. Push\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6345_hand.jpg", + "images/2025-01-14-11-53-50_6348_hand.jpg", + "images/2025-01-14-11-53-50_6351_hand.jpg", + "images/2025-01-14-11-53-50_6354_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert round peg 2', what low-level action should happen next?\nOptions:\nA. Twist\nB. Release\nC. Pull\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6439_hand.jpg", + "images/2025-01-14-11-53-50_6442_hand.jpg", + "images/2025-01-14-11-53-50_6445_hand.jpg", + "images/2025-01-14-11-53-50_6448_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Pick round peg 3', what low-level action should happen next?\nOptions:\nA. Approach\nB. Twist\nC. Nudge\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Lift" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6596_hand.jpg", + "images/2025-01-14-11-53-50_6599_hand.jpg", + "images/2025-01-14-11-53-50_6602_hand.jpg", + "images/2025-01-14-11-53-50_6605_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert round peg 3', what low-level action should happen next?\nOptions:\nA. Push\nB. Pull\nC. Align\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6821_hand.jpg", + "images/2025-01-14-11-53-50_6824_hand.jpg", + "images/2025-01-14-11-53-50_6827_hand.jpg", + "images/2025-01-14-11-53-50_6830_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert round peg 3', what low-level action should happen next?\nOptions:\nA. Lift\nB. Release\nC. Nudge\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6961_hand.jpg", + "images/2025-01-14-11-53-50_6964_hand.jpg", + "images/2025-01-14-11-53-50_6967_hand.jpg", + "images/2025-01-14-11-53-50_6970_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Pick D-SUB', what low-level action should happen next?\nOptions:\nA. Lift\nB. Twist\nC. Release\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Lift" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7137_hand.jpg", + "images/2025-01-14-11-53-50_7140_hand.jpg", + "images/2025-01-14-11-53-50_7143_hand.jpg", + "images/2025-01-14-11-53-50_7146_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Place D-SUB', what low-level action should happen next?\nOptions:\nA. Pull\nB. Nudge\nC. Release\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7290_hand.jpg", + "images/2025-01-14-11-53-50_7293_hand.jpg", + "images/2025-01-14-11-53-50_7296_hand.jpg", + "images/2025-01-14-11-53-50_7299_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Pick D-SUB', what low-level action should happen next?\nOptions:\nA. Pull\nB. Push\nC. Approach\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Lift" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7426_hand.jpg", + "images/2025-01-14-11-53-50_7429_hand.jpg", + "images/2025-01-14-11-53-50_7432_hand.jpg", + "images/2025-01-14-11-53-50_7435_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Place D-SUB', what low-level action should happen next?\nOptions:\nA. Release\nB. Nudge\nC. Lift\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7550_hand.jpg", + "images/2025-01-14-11-53-50_7553_hand.jpg", + "images/2025-01-14-11-53-50_7556_hand.jpg", + "images/2025-01-14-11-53-50_7559_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Pick D-SUB', what low-level action should happen next?\nOptions:\nA. Pull\nB. Release\nC. Twist\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Lift" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7674_hand.jpg", + "images/2025-01-14-11-53-50_7677_hand.jpg", + "images/2025-01-14-11-53-50_7680_hand.jpg", + "images/2025-01-14-11-53-50_7683_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert D-SUB', what low-level action should happen next?\nOptions:\nA. Align\nB. Push\nC. Twist\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7842_hand.jpg", + "images/2025-01-14-11-53-50_7845_hand.jpg", + "images/2025-01-14-11-53-50_7848_hand.jpg", + "images/2025-01-14-11-53-50_7851_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert D-SUB', what low-level action should happen next?\nOptions:\nA. Release\nB. Push\nC. Twist\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Push" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7936_hand.jpg", + "images/2025-01-14-11-53-50_7939_hand.jpg", + "images/2025-01-14-11-53-50_7942_hand.jpg", + "images/2025-01-14-11-53-50_7945_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert D-SUB', what low-level action should happen next?\nOptions:\nA. Lift\nB. Release\nC. Nudge\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7978_hand.jpg", + "images/2025-01-14-11-53-50_7981_hand.jpg", + "images/2025-01-14-11-53-50_7984_hand.jpg", + "images/2025-01-14-11-53-50_7987_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Pick waterproof', what low-level action should happen next?\nOptions:\nA. Release\nB. Pull\nC. Lift\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Lift" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8103_hand.jpg", + "images/2025-01-14-11-53-50_8106_hand.jpg", + "images/2025-01-14-11-53-50_8109_hand.jpg", + "images/2025-01-14-11-53-50_8112_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Place waterproof', what low-level action should happen next?\nOptions:\nA. Lift\nB. Release\nC. Twist\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8247_hand.jpg", + "images/2025-01-14-11-53-50_8250_hand.jpg", + "images/2025-01-14-11-53-50_8253_hand.jpg", + "images/2025-01-14-11-53-50_8256_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Pick waterproof', what low-level action should happen next?\nOptions:\nA. Push\nB. Pull\nC. Lift\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Lift" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8406_hand.jpg", + "images/2025-01-14-11-53-50_8409_hand.jpg", + "images/2025-01-14-11-53-50_8412_hand.jpg", + "images/2025-01-14-11-53-50_8415_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert waterproof', what low-level action should happen next?\nOptions:\nA. Pull\nB. Twist\nC. Lift\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8621_hand.jpg", + "images/2025-01-14-11-53-50_8624_hand.jpg", + "images/2025-01-14-11-53-50_8627_hand.jpg", + "images/2025-01-14-11-53-50_8630_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert waterproof', what low-level action should happen next?\nOptions:\nA. Grasp\nB. Push\nC. Pull\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Push" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8903_hand.jpg", + "images/2025-01-14-11-53-50_8906_hand.jpg", + "images/2025-01-14-11-53-50_8909_hand.jpg", + "images/2025-01-14-11-53-50_8912_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Pick bolt 4', what low-level action should happen next?\nOptions:\nA. Lift\nB. Align\nC. Nudge\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Lift" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9372_hand.jpg", + "images/2025-01-14-11-53-50_9375_hand.jpg", + "images/2025-01-14-11-53-50_9378_hand.jpg", + "images/2025-01-14-11-53-50_9381_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Place bolt 4', what low-level action should happen next?\nOptions:\nA. Grasp\nB. Lift\nC. Release\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9565_hand.jpg", + "images/2025-01-14-11-53-50_9568_hand.jpg", + "images/2025-01-14-11-53-50_9571_hand.jpg", + "images/2025-01-14-11-53-50_9574_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Pick bolt 4', what low-level action should happen next?\nOptions:\nA. Nudge\nB. Align\nC. Lift\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Lift" + } + ], + "images": [ + "images/2025-01-14-11-53-50_9733_hand.jpg", + "images/2025-01-14-11-53-50_9736_hand.jpg", + "images/2025-01-14-11-53-50_9739_hand.jpg", + "images/2025-01-14-11-53-50_9742_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert bolt 4', what low-level action should happen next?\nOptions:\nA. Grasp\nB. Release\nC. Align\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10014_hand.jpg", + "images/2025-01-14-11-53-50_10017_hand.jpg", + "images/2025-01-14-11-53-50_10020_hand.jpg", + "images/2025-01-14-11-53-50_10023_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Place bolt 4', what low-level action should happen next?\nOptions:\nA. Twist\nB. Align\nC. Release\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10326_hand.jpg", + "images/2025-01-14-11-53-50_10329_hand.jpg", + "images/2025-01-14-11-53-50_10332_hand.jpg", + "images/2025-01-14-11-53-50_10335_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Pick square peg 4', what low-level action should happen next?\nOptions:\nA. Approach\nB. Nudge\nC. Release\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Lift" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10529_hand.jpg", + "images/2025-01-14-11-53-50_10532_hand.jpg", + "images/2025-01-14-11-53-50_10535_hand.jpg", + "images/2025-01-14-11-53-50_10538_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert square peg 4', what low-level action should happen next?\nOptions:\nA. Align\nB. Twist\nC. Pull\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10714_hand.jpg", + "images/2025-01-14-11-53-50_10717_hand.jpg", + "images/2025-01-14-11-53-50_10720_hand.jpg", + "images/2025-01-14-11-53-50_10723_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert square peg 4', what low-level action should happen next?\nOptions:\nA. Lift\nB. Approach\nC. Release\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10776_hand.jpg", + "images/2025-01-14-11-53-50_10779_hand.jpg", + "images/2025-01-14-11-53-50_10782_hand.jpg", + "images/2025-01-14-11-53-50_10785_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Pick square peg 3', what low-level action should happen next?\nOptions:\nA. Nudge\nB. Twist\nC. Align\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Lift" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10942_hand.jpg", + "images/2025-01-14-11-53-50_10945_hand.jpg", + "images/2025-01-14-11-53-50_10948_hand.jpg", + "images/2025-01-14-11-53-50_10951_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert square peg 2', what low-level action should happen next?\nOptions:\nA. Align\nB. Twist\nC. Grasp\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11210_hand.jpg", + "images/2025-01-14-11-53-50_11213_hand.jpg", + "images/2025-01-14-11-53-50_11216_hand.jpg", + "images/2025-01-14-11-53-50_11219_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert square peg 3', what low-level action should happen next?\nOptions:\nA. Nudge\nB. Twist\nC. Align\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11488_hand.jpg", + "images/2025-01-14-11-53-50_11491_hand.jpg", + "images/2025-01-14-11-53-50_11494_hand.jpg", + "images/2025-01-14-11-53-50_11497_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert square peg 3', what low-level action should happen next?\nOptions:\nA. Lift\nB. Release\nC. Twist\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11562_hand.jpg", + "images/2025-01-14-11-53-50_11565_hand.jpg", + "images/2025-01-14-11-53-50_11568_hand.jpg", + "images/2025-01-14-11-53-50_11571_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Pick square peg 2', what low-level action should happen next?\nOptions:\nA. Lift\nB. Pull\nC. Approach\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Lift" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11703_hand.jpg", + "images/2025-01-14-11-53-50_11706_hand.jpg", + "images/2025-01-14-11-53-50_11709_hand.jpg", + "images/2025-01-14-11-53-50_11712_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert square peg 2', what low-level action should happen next?\nOptions:\nA. Align\nB. Pull\nC. Twist\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11893_hand.jpg", + "images/2025-01-14-11-53-50_11896_hand.jpg", + "images/2025-01-14-11-53-50_11899_hand.jpg", + "images/2025-01-14-11-53-50_11902_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert square peg 2', what low-level action should happen next?\nOptions:\nA. Pull\nB. Push\nC. Nudge\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11984_hand.jpg", + "images/2025-01-14-11-53-50_11987_hand.jpg", + "images/2025-01-14-11-53-50_11990_hand.jpg", + "images/2025-01-14-11-53-50_11993_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Pick square peg 1', what low-level action should happen next?\nOptions:\nA. Pull\nB. Release\nC. Lift\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Lift" + } + ], + "images": [ + "images/2025-01-14-11-53-50_12118_hand.jpg", + "images/2025-01-14-11-53-50_12121_hand.jpg", + "images/2025-01-14-11-53-50_12124_hand.jpg", + "images/2025-01-14-11-53-50_12127_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert square peg 1', what low-level action should happen next?\nOptions:\nA. Nudge\nB. Push\nC. Pull\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_12362_hand.jpg", + "images/2025-01-14-11-53-50_12365_hand.jpg", + "images/2025-01-14-11-53-50_12368_hand.jpg", + "images/2025-01-14-11-53-50_12371_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Place square peg 1', what low-level action should happen next?\nOptions:\nA. Release\nB. Push\nC. Grasp\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_13403_hand.jpg", + "images/2025-01-14-11-53-50_13406_hand.jpg", + "images/2025-01-14-11-53-50_13409_hand.jpg", + "images/2025-01-14-11-53-50_13412_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Place square peg 1', what low-level action should happen next?\nOptions:\nA. Pull\nB. Twist\nC. Lift\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_13595_hand.jpg", + "images/2025-01-14-11-53-50_13598_hand.jpg", + "images/2025-01-14-11-53-50_13601_hand.jpg", + "images/2025-01-14-11-53-50_13604_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Pick round peg 4', what low-level action should happen next?\nOptions:\nA. Pull\nB. Nudge\nC. Approach\nD. Lift\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Lift" + } + ], + "images": [ + "images/2025-01-14-11-53-50_13797_hand.jpg", + "images/2025-01-14-11-53-50_13800_hand.jpg", + "images/2025-01-14-11-53-50_13803_hand.jpg", + "images/2025-01-14-11-53-50_13806_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert round peg 4', what low-level action should happen next?\nOptions:\nA. Push\nB. Lift\nC. Align\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14016_hand.jpg", + "images/2025-01-14-11-53-50_14019_hand.jpg", + "images/2025-01-14-11-53-50_14022_hand.jpg", + "images/2025-01-14-11-53-50_14025_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert round peg 4', what low-level action should happen next?\nOptions:\nA. Twist\nB. Pull\nC. Release\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14157_hand.jpg", + "images/2025-01-14-11-53-50_14160_hand.jpg", + "images/2025-01-14-11-53-50_14163_hand.jpg", + "images/2025-01-14-11-53-50_14166_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Pick bolt 4', what low-level action should happen next?\nOptions:\nA. Nudge\nB. Lift\nC. Align\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Lift" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14341_hand.jpg", + "images/2025-01-14-11-53-50_14344_hand.jpg", + "images/2025-01-14-11-53-50_14347_hand.jpg", + "images/2025-01-14-11-53-50_14350_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert bolt 4', what low-level action should happen next?\nOptions:\nA. Align\nB. Pull\nC. Release\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14523_hand.jpg", + "images/2025-01-14-11-53-50_14526_hand.jpg", + "images/2025-01-14-11-53-50_14529_hand.jpg", + "images/2025-01-14-11-53-50_14532_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert bolt 4', what low-level action should happen next?\nOptions:\nA. Release\nB. Twist\nC. Approach\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Twist" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14624_hand.jpg", + "images/2025-01-14-11-53-50_14627_hand.jpg", + "images/2025-01-14-11-53-50_14630_hand.jpg", + "images/2025-01-14-11-53-50_14633_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the ongoing high-level action 'Insert bolt 4', what low-level action should happen next?\nOptions:\nA. Release\nB. Approach\nC. Grasp\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Release" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14733_hand.jpg", + "images/2025-01-14-11-53-50_14736_hand.jpg", + "images/2025-01-14-11-53-50_14739_hand.jpg", + "images/2025-01-14-11-53-50_14742_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T11", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the high-level action 'Insert USB', which action best fills [MASK] in this local chain: Approach > [MASK] > Release?\nOptions:\nA. Align\nB. Nudge\nC. Release\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_632_hand.jpg", + "images/2025-01-14-11-53-50_635_hand.jpg", + "images/2025-01-14-11-53-50_638_hand.jpg", + "images/2025-01-14-11-53-50_641_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T12", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the high-level action 'Insert Ethernet', which action best fills [MASK] in this local chain: Approach > [MASK] > Push?\nOptions:\nA. Approach\nB. Grasp\nC. Release\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1193_hand.jpg", + "images/2025-01-14-11-53-50_1196_hand.jpg", + "images/2025-01-14-11-53-50_1199_hand.jpg", + "images/2025-01-14-11-53-50_1202_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T12", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the high-level action 'Insert Ethernet', which action best fills [MASK] in this local chain: Align > [MASK] > Release?\nOptions:\nA. Twist\nB. Push\nC. Release\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Push" + } + ], + "images": [ + "images/2025-01-14-11-53-50_1274_hand.jpg", + "images/2025-01-14-11-53-50_1277_hand.jpg", + "images/2025-01-14-11-53-50_1280_hand.jpg", + "images/2025-01-14-11-53-50_1283_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T12", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the high-level action 'Insert small gear', which action best fills [MASK] in this local chain: Approach > [MASK] > Push?\nOptions:\nA. Nudge\nB. Align\nC. Grasp\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2020_hand.jpg", + "images/2025-01-14-11-53-50_2023_hand.jpg", + "images/2025-01-14-11-53-50_2026_hand.jpg", + "images/2025-01-14-11-53-50_2029_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T12", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the high-level action 'Insert large gear', which action best fills [MASK] in this local chain: Approach > [MASK] > Push?\nOptions:\nA. Release\nB. Lift\nC. Align\nD. Twist\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_2576_hand.jpg", + "images/2025-01-14-11-53-50_2579_hand.jpg", + "images/2025-01-14-11-53-50_2582_hand.jpg", + "images/2025-01-14-11-53-50_2585_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T12", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the high-level action 'Insert medium gear', which action best fills [MASK] in this local chain: Approach > [MASK] > Push?\nOptions:\nA. Grasp\nB. Release\nC. Align\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_3366_hand.jpg", + "images/2025-01-14-11-53-50_3369_hand.jpg", + "images/2025-01-14-11-53-50_3372_hand.jpg", + "images/2025-01-14-11-53-50_3375_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T12", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the high-level action 'Insert medium gear', which action best fills [MASK] in this local chain: Align > [MASK] > Grasp?\nOptions:\nA. Lift\nB. Approach\nC. Push\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Push" + } + ], + "images": [ + "images/2025-01-14-11-53-50_3589_hand.jpg", + "images/2025-01-14-11-53-50_3592_hand.jpg", + "images/2025-01-14-11-53-50_3595_hand.jpg", + "images/2025-01-14-11-53-50_3598_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T12", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the high-level action 'Insert medium gear', which action best fills [MASK] in this local chain: Push > [MASK] > Align?\nOptions:\nA. Lift\nB. Grasp\nC. Twist\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Grasp" + } + ], + "images": [ + "images/2025-01-14-11-53-50_3682_hand.jpg", + "images/2025-01-14-11-53-50_3685_hand.jpg", + "images/2025-01-14-11-53-50_3688_hand.jpg", + "images/2025-01-14-11-53-50_3691_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T12", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the high-level action 'Insert medium gear', which action best fills [MASK] in this local chain: Grasp > [MASK] > Push?\nOptions:\nA. Align\nB. Pull\nC. Twist\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_3794_hand.jpg", + "images/2025-01-14-11-53-50_3797_hand.jpg", + "images/2025-01-14-11-53-50_3800_hand.jpg", + "images/2025-01-14-11-53-50_3803_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T12", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the high-level action 'Insert BNC', which action best fills [MASK] in this local chain: Approach > [MASK] > Twist?\nOptions:\nA. Align\nB. Twist\nC. Pull\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_4506_hand.jpg", + "images/2025-01-14-11-53-50_4509_hand.jpg", + "images/2025-01-14-11-53-50_4512_hand.jpg", + "images/2025-01-14-11-53-50_4515_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T12", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the high-level action 'Insert BNC', which action best fills [MASK] in this local chain: Align > [MASK] > Release?\nOptions:\nA. Approach\nB. Push\nC. Twist\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Twist" + } + ], + "images": [ + "images/2025-01-14-11-53-50_4732_hand.jpg", + "images/2025-01-14-11-53-50_4735_hand.jpg", + "images/2025-01-14-11-53-50_4738_hand.jpg", + "images/2025-01-14-11-53-50_4741_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T12", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the high-level action 'Insert round peg 1', which action best fills [MASK] in this local chain: Approach > [MASK] > Release?\nOptions:\nA. Pull\nB. Align\nC. Twist\nD. Grasp\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_5851_hand.jpg", + "images/2025-01-14-11-53-50_5854_hand.jpg", + "images/2025-01-14-11-53-50_5857_hand.jpg", + "images/2025-01-14-11-53-50_5860_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T12", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the high-level action 'Insert round peg 2', which action best fills [MASK] in this local chain: Approach > [MASK] > Release?\nOptions:\nA. Approach\nB. Pull\nC. Nudge\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6430_hand.jpg", + "images/2025-01-14-11-53-50_6433_hand.jpg", + "images/2025-01-14-11-53-50_6436_hand.jpg", + "images/2025-01-14-11-53-50_6439_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T12", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the high-level action 'Insert round peg 3', which action best fills [MASK] in this local chain: Approach > [MASK] > Release?\nOptions:\nA. Release\nB. Align\nC. Pull\nD. Approach\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_6942_hand.jpg", + "images/2025-01-14-11-53-50_6945_hand.jpg", + "images/2025-01-14-11-53-50_6948_hand.jpg", + "images/2025-01-14-11-53-50_6951_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T12", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the high-level action 'Insert D-SUB', which action best fills [MASK] in this local chain: Approach > [MASK] > Push?\nOptions:\nA. Grasp\nB. Twist\nC. Pull\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7926_hand.jpg", + "images/2025-01-14-11-53-50_7929_hand.jpg", + "images/2025-01-14-11-53-50_7932_hand.jpg", + "images/2025-01-14-11-53-50_7935_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T12", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the high-level action 'Insert D-SUB', which action best fills [MASK] in this local chain: Align > [MASK] > Release?\nOptions:\nA. Approach\nB. Nudge\nC. Grasp\nD. Push\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Push" + } + ], + "images": [ + "images/2025-01-14-11-53-50_7972_hand.jpg", + "images/2025-01-14-11-53-50_7975_hand.jpg", + "images/2025-01-14-11-53-50_7978_hand.jpg", + "images/2025-01-14-11-53-50_7981_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T12", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the high-level action 'Insert waterproof', which action best fills [MASK] in this local chain: Approach > [MASK] > Push?\nOptions:\nA. Approach\nB. Grasp\nC. Align\nD. Release\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_8850_hand.jpg", + "images/2025-01-14-11-53-50_8853_hand.jpg", + "images/2025-01-14-11-53-50_8856_hand.jpg", + "images/2025-01-14-11-53-50_8859_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T12", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the high-level action 'Insert square peg 4', which action best fills [MASK] in this local chain: Approach > [MASK] > Release?\nOptions:\nA. Release\nB. Lift\nC. Push\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_10771_hand.jpg", + "images/2025-01-14-11-53-50_10774_hand.jpg", + "images/2025-01-14-11-53-50_10777_hand.jpg", + "images/2025-01-14-11-53-50_10780_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T12", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the high-level action 'Insert square peg 3', which action best fills [MASK] in this local chain: Approach > [MASK] > Release?\nOptions:\nA. Grasp\nB. Nudge\nC. Pull\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11557_hand.jpg", + "images/2025-01-14-11-53-50_11560_hand.jpg", + "images/2025-01-14-11-53-50_11563_hand.jpg", + "images/2025-01-14-11-53-50_11566_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T12", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the high-level action 'Insert square peg 2', which action best fills [MASK] in this local chain: Approach > [MASK] > Release?\nOptions:\nA. Twist\nB. Lift\nC. Pull\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_11975_hand.jpg", + "images/2025-01-14-11-53-50_11978_hand.jpg", + "images/2025-01-14-11-53-50_11981_hand.jpg", + "images/2025-01-14-11-53-50_11984_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T12", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the high-level action 'Place square peg 1', which action best fills [MASK] in this local chain: Approach > [MASK] > Release?\nOptions:\nA. Twist\nB. Approach\nC. Release\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_13564_hand.jpg", + "images/2025-01-14-11-53-50_13567_hand.jpg", + "images/2025-01-14-11-53-50_13570_hand.jpg", + "images/2025-01-14-11-53-50_13573_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T12", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the high-level action 'Insert round peg 4', which action best fills [MASK] in this local chain: Approach > [MASK] > Release?\nOptions:\nA. Twist\nB. Align\nC. Approach\nD. Pull\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14138_hand.jpg", + "images/2025-01-14-11-53-50_14141_hand.jpg", + "images/2025-01-14-11-53-50_14144_hand.jpg", + "images/2025-01-14-11-53-50_14147_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T12", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the high-level action 'Insert bolt 4', which action best fills [MASK] in this local chain: Approach > [MASK] > Twist?\nOptions:\nA. Pull\nB. Nudge\nC. Lift\nD. Align\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Align" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14610_hand.jpg", + "images/2025-01-14-11-53-50_14613_hand.jpg", + "images/2025-01-14-11-53-50_14616_hand.jpg", + "images/2025-01-14-11-53-50_14619_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T12", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nWithin the high-level action 'Insert bolt 4', which action best fills [MASK] in this local chain: Align > [MASK] > Release?\nOptions:\nA. Release\nB. Push\nC. Twist\nD. Nudge\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Twist" + } + ], + "images": [ + "images/2025-01-14-11-53-50_14712_hand.jpg", + "images/2025-01-14-11-53-50_14715_hand.jpg", + "images/2025-01-14-11-53-50_14718_hand.jpg", + "images/2025-01-14-11-53-50_14721_hand.jpg" + ], + "dataset": "REASSEMBLE", + "source_dataset": "REASSEMBLE", + "task_type": "T12", + "task_id": "2025-01-14-11-53-50", + "split": "eval", + "split_group_id": "2025-01-14-11-53-50", + "recording_id": "2025-01-14-11-53-50", + "episode_id": null, + "camera": "hand", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Down" + } + ], + "images": [ + "images/task_004_episode_15_0_cam_high.jpg", + "images/task_004_episode_15_15_cam_high.jpg", + "images/task_004_episode_15_30_cam_high.jpg", + "images/task_004_episode_15_45_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Left" + } + ], + "images": [ + "images/task_004_episode_15_12_cam_high.jpg", + "images/task_004_episode_15_27_cam_high.jpg", + "images/task_004_episode_15_42_cam_high.jpg", + "images/task_004_episode_15_57_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Left" + } + ], + "images": [ + "images/task_004_episode_15_18_cam_high.jpg", + "images/task_004_episode_15_33_cam_high.jpg", + "images/task_004_episode_15_48_cam_high.jpg", + "images/task_004_episode_15_63_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Left" + } + ], + "images": [ + "images/task_004_episode_15_24_cam_high.jpg", + "images/task_004_episode_15_39_cam_high.jpg", + "images/task_004_episode_15_54_cam_high.jpg", + "images/task_004_episode_15_69_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_004_episode_15_300_cam_high.jpg", + "images/task_004_episode_15_315_cam_high.jpg", + "images/task_004_episode_15_330_cam_high.jpg", + "images/task_004_episode_15_345_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_004_episode_15_306_cam_high.jpg", + "images/task_004_episode_15_321_cam_high.jpg", + "images/task_004_episode_15_336_cam_high.jpg", + "images/task_004_episode_15_351_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_004_episode_15_312_cam_high.jpg", + "images/task_004_episode_15_327_cam_high.jpg", + "images/task_004_episode_15_342_cam_high.jpg", + "images/task_004_episode_15_357_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_004_episode_15_354_cam_high.jpg", + "images/task_004_episode_15_369_cam_high.jpg", + "images/task_004_episode_15_384_cam_high.jpg", + "images/task_004_episode_15_399_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_004_episode_15_360_cam_high.jpg", + "images/task_004_episode_15_375_cam_high.jpg", + "images/task_004_episode_15_390_cam_high.jpg", + "images/task_004_episode_15_405_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_004_episode_15_366_cam_high.jpg", + "images/task_004_episode_15_381_cam_high.jpg", + "images/task_004_episode_15_396_cam_high.jpg", + "images/task_004_episode_15_411_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Left\nC. Down\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Down" + } + ], + "images": [ + "images/task_004_episode_15_420_cam_high.jpg", + "images/task_004_episode_15_435_cam_high.jpg", + "images/task_004_episode_15_450_cam_high.jpg", + "images/task_004_episode_15_465_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Down\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Up" + } + ], + "images": [ + "images/task_004_episode_15_426_cam_high.jpg", + "images/task_004_episode_15_441_cam_high.jpg", + "images/task_004_episode_15_456_cam_high.jpg", + "images/task_004_episode_15_471_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Left\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Down" + } + ], + "images": [ + "images/task_004_episode_15_426_cam_high.jpg", + "images/task_004_episode_15_441_cam_high.jpg", + "images/task_004_episode_15_456_cam_high.jpg", + "images/task_004_episode_15_471_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Left\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Up" + } + ], + "images": [ + "images/task_004_episode_15_432_cam_high.jpg", + "images/task_004_episode_15_447_cam_high.jpg", + "images/task_004_episode_15_462_cam_high.jpg", + "images/task_004_episode_15_477_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Up" + } + ], + "images": [ + "images/task_004_episode_15_438_cam_high.jpg", + "images/task_004_episode_15_453_cam_high.jpg", + "images/task_004_episode_15_468_cam_high.jpg", + "images/task_004_episode_15_483_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Down" + } + ], + "images": [ + "images/task_004_episode_15_786_cam_high.jpg", + "images/task_004_episode_15_801_cam_high.jpg", + "images/task_004_episode_15_816_cam_high.jpg", + "images/task_004_episode_15_831_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Down" + } + ], + "images": [ + "images/task_004_episode_15_792_cam_high.jpg", + "images/task_004_episode_15_807_cam_high.jpg", + "images/task_004_episode_15_822_cam_high.jpg", + "images/task_004_episode_15_837_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Down\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Down" + } + ], + "images": [ + "images/task_004_episode_15_798_cam_high.jpg", + "images/task_004_episode_15_813_cam_high.jpg", + "images/task_004_episode_15_828_cam_high.jpg", + "images/task_004_episode_15_843_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Up" + } + ], + "images": [ + "images/task_004_episode_15_822_cam_high.jpg", + "images/task_004_episode_15_837_cam_high.jpg", + "images/task_004_episode_15_852_cam_high.jpg", + "images/task_004_episode_15_867_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Up" + } + ], + "images": [ + "images/task_004_episode_15_828_cam_high.jpg", + "images/task_004_episode_15_843_cam_high.jpg", + "images/task_004_episode_15_858_cam_high.jpg", + "images/task_004_episode_15_873_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Up" + } + ], + "images": [ + "images/task_004_episode_15_834_cam_high.jpg", + "images/task_004_episode_15_849_cam_high.jpg", + "images/task_004_episode_15_864_cam_high.jpg", + "images/task_004_episode_15_879_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_004_episode_15_357_cam_high.jpg", + "images/task_004_episode_15_372_cam_high.jpg", + "images/task_004_episode_15_387_cam_high.jpg", + "images/task_004_episode_15_402_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_004_episode_15_899_cam_high.jpg", + "images/task_004_episode_15_914_cam_high.jpg", + "images/task_004_episode_15_929_cam_high.jpg", + "images/task_004_episode_15_944_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_004_episode_15_291_cam_high.jpg", + "images/task_004_episode_15_306_cam_high.jpg", + "images/task_004_episode_15_321_cam_high.jpg", + "images/task_004_episode_15_336_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_004_episode_15_278_cam_high.jpg", + "images/task_004_episode_15_293_cam_high.jpg", + "images/task_004_episode_15_308_cam_high.jpg", + "images/task_004_episode_15_323_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_004_episode_15_873_cam_high.jpg", + "images/task_004_episode_15_888_cam_high.jpg", + "images/task_004_episode_15_903_cam_high.jpg", + "images/task_004_episode_15_918_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_004_episode_15_410_cam_high.jpg", + "images/task_004_episode_15_425_cam_high.jpg", + "images/task_004_episode_15_440_cam_high.jpg", + "images/task_004_episode_15_455_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_004_episode_15_820_cam_high.jpg", + "images/task_004_episode_15_835_cam_high.jpg", + "images/task_004_episode_15_850_cam_high.jpg", + "images/task_004_episode_15_865_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_004_episode_15_767_cam_high.jpg", + "images/task_004_episode_15_782_cam_high.jpg", + "images/task_004_episode_15_797_cam_high.jpg", + "images/task_004_episode_15_812_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_004_episode_15_53_cam_high.jpg", + "images/task_004_episode_15_68_cam_high.jpg", + "images/task_004_episode_15_83_cam_high.jpg", + "images/task_004_episode_15_98_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_004_episode_15_860_cam_high.jpg", + "images/task_004_episode_15_875_cam_high.jpg", + "images/task_004_episode_15_890_cam_high.jpg", + "images/task_004_episode_15_905_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_004_episode_15_40_cam_high.jpg", + "images/task_004_episode_15_55_cam_high.jpg", + "images/task_004_episode_15_70_cam_high.jpg", + "images/task_004_episode_15_85_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_004_episode_15_417_cam_high.jpg", + "images/task_004_episode_15_420_cam_high.jpg", + "images/task_004_episode_15_423_cam_high.jpg", + "images/task_004_episode_15_426_cam_high.jpg", + "images/task_004_episode_15_429_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_004_episode_15_217_cam_high.jpg", + "images/task_004_episode_15_220_cam_high.jpg", + "images/task_004_episode_15_223_cam_high.jpg", + "images/task_004_episode_15_226_cam_high.jpg", + "images/task_004_episode_15_229_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_004_episode_15_805_cam_high.jpg", + "images/task_004_episode_15_808_cam_high.jpg", + "images/task_004_episode_15_811_cam_high.jpg", + "images/task_004_episode_15_814_cam_high.jpg", + "images/task_004_episode_15_817_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_004_episode_15_640_cam_high.jpg", + "images/task_004_episode_15_643_cam_high.jpg", + "images/task_004_episode_15_646_cam_high.jpg", + "images/task_004_episode_15_649_cam_high.jpg", + "images/task_004_episode_15_652_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_004_episode_15_6_cam_high.jpg", + "images/task_004_episode_15_9_cam_high.jpg", + "images/task_004_episode_15_12_cam_high.jpg", + "images/task_004_episode_15_15_cam_high.jpg", + "images/task_004_episode_15_18_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_004_episode_15_523_cam_high.jpg", + "images/task_004_episode_15_526_cam_high.jpg", + "images/task_004_episode_15_529_cam_high.jpg", + "images/task_004_episode_15_532_cam_high.jpg", + "images/task_004_episode_15_535_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_004_episode_15_640_cam_high.jpg", + "images/task_004_episode_15_643_cam_high.jpg", + "images/task_004_episode_15_646_cam_high.jpg", + "images/task_004_episode_15_649_cam_high.jpg", + "images/task_004_episode_15_652_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_004_episode_15_676_cam_high.jpg", + "images/task_004_episode_15_679_cam_high.jpg", + "images/task_004_episode_15_682_cam_high.jpg", + "images/task_004_episode_15_685_cam_high.jpg", + "images/task_004_episode_15_688_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Frame X happens before Frame Y" + } + ], + "images": [ + "images/task_004_episode_15_180_cam_high.jpg", + "images/task_004_episode_15_719_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Frame X happens before Frame Y" + } + ], + "images": [ + "images/task_004_episode_15_250_cam_high.jpg", + "images/task_004_episode_15_619_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Frame X happens before Frame Y" + } + ], + "images": [ + "images/task_004_episode_15_350_cam_high.jpg", + "images/task_004_episode_15_819_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_004_episode_15_632_cam_high.jpg", + "images/task_004_episode_15_737_cam_high.jpg", + "images/task_004_episode_15_685_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_004_episode_15_209_cam_high.jpg", + "images/task_004_episode_15_262_cam_high.jpg", + "images/task_004_episode_15_314_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_004_episode_15_156_cam_high.jpg", + "images/task_004_episode_15_103_cam_high.jpg", + "images/task_004_episode_15_50_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_004_episode_15_156_cam_high.jpg", + "images/task_004_episode_15_209_cam_high.jpg", + "images/task_004_episode_15_262_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_004_episode_15_579_cam_high.jpg", + "images/task_004_episode_15_632_cam_high.jpg", + "images/task_004_episode_15_685_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_004_episode_15_843_cam_high.jpg", + "images/task_004_episode_15_896_cam_high.jpg", + "images/task_004_episode_15_790_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_004_episode_15_367_cam_high.jpg", + "images/task_004_episode_15_314_cam_high.jpg", + "images/task_004_episode_15_420_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_004_episode_15_262_cam_high.jpg", + "images/task_004_episode_15_367_cam_high.jpg", + "images/task_004_episode_15_314_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_004_episode_15_790_cam_high.jpg", + "images/task_004_episode_15_843_cam_high.jpg", + "images/task_004_episode_15_737_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_004_episode_15_526_cam_high.jpg", + "images/task_004_episode_15_473_cam_high.jpg", + "images/task_004_episode_15_420_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_004_episode_15_473_cam_high.jpg", + "images/task_004_episode_15_367_cam_high.jpg", + "images/task_004_episode_15_420_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_004_episode_15_737_cam_high.jpg", + "images/task_004_episode_15_790_cam_high.jpg", + "images/task_004_episode_15_685_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_15", + "recording_id": "task_004_episode_15", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Left" + } + ], + "images": [ + "images/task_004_episode_44_6_cam_high.jpg", + "images/task_004_episode_44_21_cam_high.jpg", + "images/task_004_episode_44_36_cam_high.jpg", + "images/task_004_episode_44_51_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Left" + } + ], + "images": [ + "images/task_004_episode_44_12_cam_high.jpg", + "images/task_004_episode_44_27_cam_high.jpg", + "images/task_004_episode_44_42_cam_high.jpg", + "images/task_004_episode_44_57_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Left" + } + ], + "images": [ + "images/task_004_episode_44_18_cam_high.jpg", + "images/task_004_episode_44_33_cam_high.jpg", + "images/task_004_episode_44_48_cam_high.jpg", + "images/task_004_episode_44_63_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_004_episode_44_282_cam_high.jpg", + "images/task_004_episode_44_297_cam_high.jpg", + "images/task_004_episode_44_312_cam_high.jpg", + "images/task_004_episode_44_327_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_004_episode_44_288_cam_high.jpg", + "images/task_004_episode_44_303_cam_high.jpg", + "images/task_004_episode_44_318_cam_high.jpg", + "images/task_004_episode_44_333_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_004_episode_44_288_cam_high.jpg", + "images/task_004_episode_44_303_cam_high.jpg", + "images/task_004_episode_44_318_cam_high.jpg", + "images/task_004_episode_44_333_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_004_episode_44_294_cam_high.jpg", + "images/task_004_episode_44_309_cam_high.jpg", + "images/task_004_episode_44_324_cam_high.jpg", + "images/task_004_episode_44_339_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_004_episode_44_294_cam_high.jpg", + "images/task_004_episode_44_309_cam_high.jpg", + "images/task_004_episode_44_324_cam_high.jpg", + "images/task_004_episode_44_339_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_004_episode_44_300_cam_high.jpg", + "images/task_004_episode_44_315_cam_high.jpg", + "images/task_004_episode_44_330_cam_high.jpg", + "images/task_004_episode_44_345_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Up" + } + ], + "images": [ + "images/task_004_episode_44_336_cam_high.jpg", + "images/task_004_episode_44_351_cam_high.jpg", + "images/task_004_episode_44_366_cam_high.jpg", + "images/task_004_episode_44_381_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Left\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Up" + } + ], + "images": [ + "images/task_004_episode_44_342_cam_high.jpg", + "images/task_004_episode_44_357_cam_high.jpg", + "images/task_004_episode_44_372_cam_high.jpg", + "images/task_004_episode_44_387_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Up" + } + ], + "images": [ + "images/task_004_episode_44_348_cam_high.jpg", + "images/task_004_episode_44_363_cam_high.jpg", + "images/task_004_episode_44_378_cam_high.jpg", + "images/task_004_episode_44_393_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Up" + } + ], + "images": [ + "images/task_004_episode_44_360_cam_high.jpg", + "images/task_004_episode_44_375_cam_high.jpg", + "images/task_004_episode_44_390_cam_high.jpg", + "images/task_004_episode_44_405_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Down" + } + ], + "images": [ + "images/task_004_episode_44_378_cam_high.jpg", + "images/task_004_episode_44_393_cam_high.jpg", + "images/task_004_episode_44_408_cam_high.jpg", + "images/task_004_episode_44_423_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Down" + } + ], + "images": [ + "images/task_004_episode_44_384_cam_high.jpg", + "images/task_004_episode_44_399_cam_high.jpg", + "images/task_004_episode_44_414_cam_high.jpg", + "images/task_004_episode_44_429_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Down" + } + ], + "images": [ + "images/task_004_episode_44_390_cam_high.jpg", + "images/task_004_episode_44_405_cam_high.jpg", + "images/task_004_episode_44_420_cam_high.jpg", + "images/task_004_episode_44_435_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Up" + } + ], + "images": [ + "images/task_004_episode_44_522_cam_high.jpg", + "images/task_004_episode_44_537_cam_high.jpg", + "images/task_004_episode_44_552_cam_high.jpg", + "images/task_004_episode_44_567_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Up" + } + ], + "images": [ + "images/task_004_episode_44_528_cam_high.jpg", + "images/task_004_episode_44_543_cam_high.jpg", + "images/task_004_episode_44_558_cam_high.jpg", + "images/task_004_episode_44_573_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Down" + } + ], + "images": [ + "images/task_004_episode_44_720_cam_high.jpg", + "images/task_004_episode_44_735_cam_high.jpg", + "images/task_004_episode_44_750_cam_high.jpg", + "images/task_004_episode_44_765_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Down" + } + ], + "images": [ + "images/task_004_episode_44_726_cam_high.jpg", + "images/task_004_episode_44_741_cam_high.jpg", + "images/task_004_episode_44_756_cam_high.jpg", + "images/task_004_episode_44_771_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Down" + } + ], + "images": [ + "images/task_004_episode_44_732_cam_high.jpg", + "images/task_004_episode_44_747_cam_high.jpg", + "images/task_004_episode_44_762_cam_high.jpg", + "images/task_004_episode_44_777_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_004_episode_44_26_cam_high.jpg", + "images/task_004_episode_44_41_cam_high.jpg", + "images/task_004_episode_44_56_cam_high.jpg", + "images/task_004_episode_44_71_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_004_episode_44_648_cam_high.jpg", + "images/task_004_episode_44_663_cam_high.jpg", + "images/task_004_episode_44_678_cam_high.jpg", + "images/task_004_episode_44_693_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_004_episode_44_674_cam_high.jpg", + "images/task_004_episode_44_689_cam_high.jpg", + "images/task_004_episode_44_704_cam_high.jpg", + "images/task_004_episode_44_719_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_004_episode_44_93_cam_high.jpg", + "images/task_004_episode_44_108_cam_high.jpg", + "images/task_004_episode_44_123_cam_high.jpg", + "images/task_004_episode_44_138_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_004_episode_44_13_cam_high.jpg", + "images/task_004_episode_44_28_cam_high.jpg", + "images/task_004_episode_44_43_cam_high.jpg", + "images/task_004_episode_44_58_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_004_episode_44_873_cam_high.jpg", + "images/task_004_episode_44_888_cam_high.jpg", + "images/task_004_episode_44_903_cam_high.jpg", + "images/task_004_episode_44_918_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_004_episode_44_754_cam_high.jpg", + "images/task_004_episode_44_769_cam_high.jpg", + "images/task_004_episode_44_784_cam_high.jpg", + "images/task_004_episode_44_799_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_004_episode_44_899_cam_high.jpg", + "images/task_004_episode_44_914_cam_high.jpg", + "images/task_004_episode_44_929_cam_high.jpg", + "images/task_004_episode_44_944_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_004_episode_44_317_cam_high.jpg", + "images/task_004_episode_44_332_cam_high.jpg", + "images/task_004_episode_44_347_cam_high.jpg", + "images/task_004_episode_44_362_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_004_episode_44_79_cam_high.jpg", + "images/task_004_episode_44_94_cam_high.jpg", + "images/task_004_episode_44_109_cam_high.jpg", + "images/task_004_episode_44_124_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_004_episode_44_913_cam_high.jpg", + "images/task_004_episode_44_928_cam_high.jpg", + "images/task_004_episode_44_943_cam_high.jpg", + "images/task_004_episode_44_958_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_004_episode_44_331_cam_high.jpg", + "images/task_004_episode_44_346_cam_high.jpg", + "images/task_004_episode_44_361_cam_high.jpg", + "images/task_004_episode_44_376_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_004_episode_44_382_cam_high.jpg", + "images/task_004_episode_44_385_cam_high.jpg", + "images/task_004_episode_44_388_cam_high.jpg", + "images/task_004_episode_44_391_cam_high.jpg", + "images/task_004_episode_44_394_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_004_episode_44_629_cam_high.jpg", + "images/task_004_episode_44_632_cam_high.jpg", + "images/task_004_episode_44_635_cam_high.jpg", + "images/task_004_episode_44_638_cam_high.jpg", + "images/task_004_episode_44_641_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_004_episode_44_887_cam_high.jpg", + "images/task_004_episode_44_890_cam_high.jpg", + "images/task_004_episode_44_893_cam_high.jpg", + "images/task_004_episode_44_896_cam_high.jpg", + "images/task_004_episode_44_899_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_004_episode_44_676_cam_high.jpg", + "images/task_004_episode_44_679_cam_high.jpg", + "images/task_004_episode_44_682_cam_high.jpg", + "images/task_004_episode_44_685_cam_high.jpg", + "images/task_004_episode_44_688_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_004_episode_44_288_cam_high.jpg", + "images/task_004_episode_44_291_cam_high.jpg", + "images/task_004_episode_44_294_cam_high.jpg", + "images/task_004_episode_44_297_cam_high.jpg", + "images/task_004_episode_44_300_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_004_episode_44_570_cam_high.jpg", + "images/task_004_episode_44_573_cam_high.jpg", + "images/task_004_episode_44_576_cam_high.jpg", + "images/task_004_episode_44_579_cam_high.jpg", + "images/task_004_episode_44_582_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_004_episode_44_429_cam_high.jpg", + "images/task_004_episode_44_432_cam_high.jpg", + "images/task_004_episode_44_435_cam_high.jpg", + "images/task_004_episode_44_438_cam_high.jpg", + "images/task_004_episode_44_441_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_004_episode_44_875_cam_high.jpg", + "images/task_004_episode_44_878_cam_high.jpg", + "images/task_004_episode_44_881_cam_high.jpg", + "images/task_004_episode_44_884_cam_high.jpg", + "images/task_004_episode_44_887_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Frame X happens before Frame Y" + } + ], + "images": [ + "images/task_004_episode_44_180_cam_high.jpg", + "images/task_004_episode_44_719_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Frame Y happens before Frame X" + } + ], + "images": [ + "images/task_004_episode_44_619_cam_high.jpg", + "images/task_004_episode_44_250_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Frame Y happens before Frame X" + } + ], + "images": [ + "images/task_004_episode_44_819_cam_high.jpg", + "images/task_004_episode_44_350_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_004_episode_44_262_cam_high.jpg", + "images/task_004_episode_44_156_cam_high.jpg", + "images/task_004_episode_44_209_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_004_episode_44_685_cam_high.jpg", + "images/task_004_episode_44_737_cam_high.jpg", + "images/task_004_episode_44_790_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_004_episode_44_843_cam_high.jpg", + "images/task_004_episode_44_790_cam_high.jpg", + "images/task_004_episode_44_737_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_004_episode_44_314_cam_high.jpg", + "images/task_004_episode_44_262_cam_high.jpg", + "images/task_004_episode_44_367_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_004_episode_44_632_cam_high.jpg", + "images/task_004_episode_44_685_cam_high.jpg", + "images/task_004_episode_44_579_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_004_episode_44_949_cam_high.jpg", + "images/task_004_episode_44_896_cam_high.jpg", + "images/task_004_episode_44_843_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_004_episode_44_843_cam_high.jpg", + "images/task_004_episode_44_896_cam_high.jpg", + "images/task_004_episode_44_790_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_004_episode_44_420_cam_high.jpg", + "images/task_004_episode_44_526_cam_high.jpg", + "images/task_004_episode_44_473_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_004_episode_44_420_cam_high.jpg", + "images/task_004_episode_44_367_cam_high.jpg", + "images/task_004_episode_44_473_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_004_episode_44_579_cam_high.jpg", + "images/task_004_episode_44_632_cam_high.jpg", + "images/task_004_episode_44_526_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_004", + "split": "eval", + "split_group_id": "task_004_episode_44", + "recording_id": "task_004_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_016_episode_5_0_cam_high.jpg", + "images/task_016_episode_5_15_cam_high.jpg", + "images/task_016_episode_5_30_cam_high.jpg", + "images/task_016_episode_5_45_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_016", + "split": "eval", + "split_group_id": "task_016_episode_5", + "recording_id": "task_016_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Left\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_016_episode_5_0_cam_high.jpg", + "images/task_016_episode_5_15_cam_high.jpg", + "images/task_016_episode_5_30_cam_high.jpg", + "images/task_016_episode_5_45_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_016", + "split": "eval", + "split_group_id": "task_016_episode_5", + "recording_id": "task_016_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_016_episode_5_6_cam_high.jpg", + "images/task_016_episode_5_21_cam_high.jpg", + "images/task_016_episode_5_36_cam_high.jpg", + "images/task_016_episode_5_51_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_016", + "split": "eval", + "split_group_id": "task_016_episode_5", + "recording_id": "task_016_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_016_episode_5_6_cam_high.jpg", + "images/task_016_episode_5_21_cam_high.jpg", + "images/task_016_episode_5_36_cam_high.jpg", + "images/task_016_episode_5_51_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_016", + "split": "eval", + "split_group_id": "task_016_episode_5", + "recording_id": "task_016_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_016_episode_5_12_cam_high.jpg", + "images/task_016_episode_5_27_cam_high.jpg", + "images/task_016_episode_5_42_cam_high.jpg", + "images/task_016_episode_5_57_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_016", + "split": "eval", + "split_group_id": "task_016_episode_5", + "recording_id": "task_016_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Down" + } + ], + "images": [ + "images/task_016_episode_5_12_cam_high.jpg", + "images/task_016_episode_5_27_cam_high.jpg", + "images/task_016_episode_5_42_cam_high.jpg", + "images/task_016_episode_5_57_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_016", + "split": "eval", + "split_group_id": "task_016_episode_5", + "recording_id": "task_016_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Left" + } + ], + "images": [ + "images/task_016_episode_5_18_cam_high.jpg", + "images/task_016_episode_5_33_cam_high.jpg", + "images/task_016_episode_5_48_cam_high.jpg", + "images/task_016_episode_5_63_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_016", + "split": "eval", + "split_group_id": "task_016_episode_5", + "recording_id": "task_016_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Left" + } + ], + "images": [ + "images/task_016_episode_5_24_cam_high.jpg", + "images/task_016_episode_5_39_cam_high.jpg", + "images/task_016_episode_5_54_cam_high.jpg", + "images/task_016_episode_5_69_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_016", + "split": "eval", + "split_group_id": "task_016_episode_5", + "recording_id": "task_016_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Left" + } + ], + "images": [ + "images/task_016_episode_5_30_cam_high.jpg", + "images/task_016_episode_5_45_cam_high.jpg", + "images/task_016_episode_5_60_cam_high.jpg", + "images/task_016_episode_5_75_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_016", + "split": "eval", + "split_group_id": "task_016_episode_5", + "recording_id": "task_016_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Down" + } + ], + "images": [ + "images/task_016_episode_5_78_cam_high.jpg", + "images/task_016_episode_5_93_cam_high.jpg", + "images/task_016_episode_5_108_cam_high.jpg", + "images/task_016_episode_5_123_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_016", + "split": "eval", + "split_group_id": "task_016_episode_5", + "recording_id": "task_016_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Down" + } + ], + "images": [ + "images/task_016_episode_5_84_cam_high.jpg", + "images/task_016_episode_5_99_cam_high.jpg", + "images/task_016_episode_5_114_cam_high.jpg", + "images/task_016_episode_5_129_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_016", + "split": "eval", + "split_group_id": "task_016_episode_5", + "recording_id": "task_016_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Down" + } + ], + "images": [ + "images/task_016_episode_5_90_cam_high.jpg", + "images/task_016_episode_5_105_cam_high.jpg", + "images/task_016_episode_5_120_cam_high.jpg", + "images/task_016_episode_5_135_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_016", + "split": "eval", + "split_group_id": "task_016_episode_5", + "recording_id": "task_016_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_016_episode_5_330_cam_high.jpg", + "images/task_016_episode_5_345_cam_high.jpg", + "images/task_016_episode_5_360_cam_high.jpg", + "images/task_016_episode_5_375_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_016", + "split": "eval", + "split_group_id": "task_016_episode_5", + "recording_id": "task_016_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Down\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Up" + } + ], + "images": [ + "images/task_016_episode_5_426_cam_high.jpg", + "images/task_016_episode_5_441_cam_high.jpg", + "images/task_016_episode_5_456_cam_high.jpg", + "images/task_016_episode_5_471_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_016", + "split": "eval", + "split_group_id": "task_016_episode_5", + "recording_id": "task_016_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Up" + } + ], + "images": [ + "images/task_016_episode_5_432_cam_high.jpg", + "images/task_016_episode_5_447_cam_high.jpg", + "images/task_016_episode_5_462_cam_high.jpg", + "images/task_016_episode_5_477_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_016", + "split": "eval", + "split_group_id": "task_016_episode_5", + "recording_id": "task_016_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Up" + } + ], + "images": [ + "images/task_016_episode_5_438_cam_high.jpg", + "images/task_016_episode_5_453_cam_high.jpg", + "images/task_016_episode_5_468_cam_high.jpg", + "images/task_016_episode_5_483_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_016", + "split": "eval", + "split_group_id": "task_016_episode_5", + "recording_id": "task_016_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Down" + } + ], + "images": [ + "images/task_016_episode_5_510_cam_high.jpg", + "images/task_016_episode_5_525_cam_high.jpg", + "images/task_016_episode_5_540_cam_high.jpg", + "images/task_016_episode_5_555_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_016", + "split": "eval", + "split_group_id": "task_016_episode_5", + "recording_id": "task_016_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Down" + } + ], + "images": [ + "images/task_016_episode_5_516_cam_high.jpg", + "images/task_016_episode_5_531_cam_high.jpg", + "images/task_016_episode_5_546_cam_high.jpg", + "images/task_016_episode_5_561_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_016", + "split": "eval", + "split_group_id": "task_016_episode_5", + "recording_id": "task_016_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_016_episode_5_688_cam_high.jpg", + "images/task_016_episode_5_703_cam_high.jpg", + "images/task_016_episode_5_718_cam_high.jpg", + "images/task_016_episode_5_733_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_016", + "split": "eval", + "split_group_id": "task_016_episode_5", + "recording_id": "task_016_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_016_episode_5_79_cam_high.jpg", + "images/task_016_episode_5_94_cam_high.jpg", + "images/task_016_episode_5_109_cam_high.jpg", + "images/task_016_episode_5_124_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_016", + "split": "eval", + "split_group_id": "task_016_episode_5", + "recording_id": "task_016_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_016_episode_5_159_cam_high.jpg", + "images/task_016_episode_5_174_cam_high.jpg", + "images/task_016_episode_5_189_cam_high.jpg", + "images/task_016_episode_5_204_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_016", + "split": "eval", + "split_group_id": "task_016_episode_5", + "recording_id": "task_016_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_016_episode_5_860_cam_high.jpg", + "images/task_016_episode_5_875_cam_high.jpg", + "images/task_016_episode_5_890_cam_high.jpg", + "images/task_016_episode_5_905_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_016", + "split": "eval", + "split_group_id": "task_016_episode_5", + "recording_id": "task_016_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_016_episode_5_899_cam_high.jpg", + "images/task_016_episode_5_914_cam_high.jpg", + "images/task_016_episode_5_929_cam_high.jpg", + "images/task_016_episode_5_944_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_016", + "split": "eval", + "split_group_id": "task_016_episode_5", + "recording_id": "task_016_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_016_episode_5_119_cam_high.jpg", + "images/task_016_episode_5_134_cam_high.jpg", + "images/task_016_episode_5_149_cam_high.jpg", + "images/task_016_episode_5_164_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_016", + "split": "eval", + "split_group_id": "task_016_episode_5", + "recording_id": "task_016_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_016_episode_5_357_cam_high.jpg", + "images/task_016_episode_5_372_cam_high.jpg", + "images/task_016_episode_5_387_cam_high.jpg", + "images/task_016_episode_5_402_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_016", + "split": "eval", + "split_group_id": "task_016_episode_5", + "recording_id": "task_016_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_016_episode_5_476_cam_high.jpg", + "images/task_016_episode_5_491_cam_high.jpg", + "images/task_016_episode_5_506_cam_high.jpg", + "images/task_016_episode_5_521_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_016", + "split": "eval", + "split_group_id": "task_016_episode_5", + "recording_id": "task_016_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_016_episode_5_436_cam_high.jpg", + "images/task_016_episode_5_451_cam_high.jpg", + "images/task_016_episode_5_466_cam_high.jpg", + "images/task_016_episode_5_481_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_016", + "split": "eval", + "split_group_id": "task_016_episode_5", + "recording_id": "task_016_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_016_episode_5_147_cam_high.jpg", + "images/task_016_episode_5_150_cam_high.jpg", + "images/task_016_episode_5_153_cam_high.jpg", + "images/task_016_episode_5_156_cam_high.jpg", + "images/task_016_episode_5_159_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_016", + "split": "eval", + "split_group_id": "task_016_episode_5", + "recording_id": "task_016_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_016_episode_5_347_cam_high.jpg", + "images/task_016_episode_5_350_cam_high.jpg", + "images/task_016_episode_5_353_cam_high.jpg", + "images/task_016_episode_5_356_cam_high.jpg", + "images/task_016_episode_5_359_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_016", + "split": "eval", + "split_group_id": "task_016_episode_5", + "recording_id": "task_016_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_016_episode_5_723_cam_high.jpg", + "images/task_016_episode_5_726_cam_high.jpg", + "images/task_016_episode_5_729_cam_high.jpg", + "images/task_016_episode_5_732_cam_high.jpg", + "images/task_016_episode_5_735_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_016", + "split": "eval", + "split_group_id": "task_016_episode_5", + "recording_id": "task_016_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_016_episode_5_6_cam_high.jpg", + "images/task_016_episode_5_9_cam_high.jpg", + "images/task_016_episode_5_12_cam_high.jpg", + "images/task_016_episode_5_15_cam_high.jpg", + "images/task_016_episode_5_18_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_016", + "split": "eval", + "split_group_id": "task_016_episode_5", + "recording_id": "task_016_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_016_episode_5_335_cam_high.jpg", + "images/task_016_episode_5_338_cam_high.jpg", + "images/task_016_episode_5_341_cam_high.jpg", + "images/task_016_episode_5_344_cam_high.jpg", + "images/task_016_episode_5_347_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_016", + "split": "eval", + "split_group_id": "task_016_episode_5", + "recording_id": "task_016_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_016_episode_5_523_cam_high.jpg", + "images/task_016_episode_5_526_cam_high.jpg", + "images/task_016_episode_5_529_cam_high.jpg", + "images/task_016_episode_5_532_cam_high.jpg", + "images/task_016_episode_5_535_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_016", + "split": "eval", + "split_group_id": "task_016_episode_5", + "recording_id": "task_016_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_016_episode_5_958_cam_high.jpg", + "images/task_016_episode_5_961_cam_high.jpg", + "images/task_016_episode_5_964_cam_high.jpg", + "images/task_016_episode_5_967_cam_high.jpg", + "images/task_016_episode_5_970_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_016", + "split": "eval", + "split_group_id": "task_016_episode_5", + "recording_id": "task_016_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_016_episode_5_593_cam_high.jpg", + "images/task_016_episode_5_596_cam_high.jpg", + "images/task_016_episode_5_599_cam_high.jpg", + "images/task_016_episode_5_602_cam_high.jpg", + "images/task_016_episode_5_605_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_016", + "split": "eval", + "split_group_id": "task_016_episode_5", + "recording_id": "task_016_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Frame Y happens before Frame X" + } + ], + "images": [ + "images/task_016_episode_5_719_cam_high.jpg", + "images/task_016_episode_5_180_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_016", + "split": "eval", + "split_group_id": "task_016_episode_5", + "recording_id": "task_016_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Frame X happens before Frame Y" + } + ], + "images": [ + "images/task_016_episode_5_250_cam_high.jpg", + "images/task_016_episode_5_619_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_016", + "split": "eval", + "split_group_id": "task_016_episode_5", + "recording_id": "task_016_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Frame Y happens before Frame X" + } + ], + "images": [ + "images/task_016_episode_5_819_cam_high.jpg", + "images/task_016_episode_5_350_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_016", + "split": "eval", + "split_group_id": "task_016_episode_5", + "recording_id": "task_016_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_016_episode_5_632_cam_high.jpg", + "images/task_016_episode_5_579_cam_high.jpg", + "images/task_016_episode_5_526_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_016", + "split": "eval", + "split_group_id": "task_016_episode_5", + "recording_id": "task_016_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_016_episode_5_156_cam_high.jpg", + "images/task_016_episode_5_262_cam_high.jpg", + "images/task_016_episode_5_209_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_016", + "split": "eval", + "split_group_id": "task_016_episode_5", + "recording_id": "task_016_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_016_episode_5_50_cam_high.jpg", + "images/task_016_episode_5_156_cam_high.jpg", + "images/task_016_episode_5_103_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_016", + "split": "eval", + "split_group_id": "task_016_episode_5", + "recording_id": "task_016_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_016_episode_5_473_cam_high.jpg", + "images/task_016_episode_5_579_cam_high.jpg", + "images/task_016_episode_5_526_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_016", + "split": "eval", + "split_group_id": "task_016_episode_5", + "recording_id": "task_016_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_016_episode_5_685_cam_high.jpg", + "images/task_016_episode_5_632_cam_high.jpg", + "images/task_016_episode_5_579_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_016", + "split": "eval", + "split_group_id": "task_016_episode_5", + "recording_id": "task_016_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_016_episode_5_896_cam_high.jpg", + "images/task_016_episode_5_843_cam_high.jpg", + "images/task_016_episode_5_949_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_016", + "split": "eval", + "split_group_id": "task_016_episode_5", + "recording_id": "task_016_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_016_episode_5_209_cam_high.jpg", + "images/task_016_episode_5_262_cam_high.jpg", + "images/task_016_episode_5_314_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_016", + "split": "eval", + "split_group_id": "task_016_episode_5", + "recording_id": "task_016_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Left" + } + ], + "images": [ + "images/task_031_episode_21_0_cam_high.jpg", + "images/task_031_episode_21_15_cam_high.jpg", + "images/task_031_episode_21_30_cam_high.jpg", + "images/task_031_episode_21_45_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Left" + } + ], + "images": [ + "images/task_031_episode_21_6_cam_high.jpg", + "images/task_031_episode_21_21_cam_high.jpg", + "images/task_031_episode_21_36_cam_high.jpg", + "images/task_031_episode_21_51_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Left\nC. Down\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Left" + } + ], + "images": [ + "images/task_031_episode_21_12_cam_high.jpg", + "images/task_031_episode_21_27_cam_high.jpg", + "images/task_031_episode_21_42_cam_high.jpg", + "images/task_031_episode_21_57_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_031_episode_21_162_cam_high.jpg", + "images/task_031_episode_21_177_cam_high.jpg", + "images/task_031_episode_21_192_cam_high.jpg", + "images/task_031_episode_21_207_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_031_episode_21_168_cam_high.jpg", + "images/task_031_episode_21_183_cam_high.jpg", + "images/task_031_episode_21_198_cam_high.jpg", + "images/task_031_episode_21_213_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_031_episode_21_174_cam_high.jpg", + "images/task_031_episode_21_189_cam_high.jpg", + "images/task_031_episode_21_204_cam_high.jpg", + "images/task_031_episode_21_219_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Up" + } + ], + "images": [ + "images/task_031_episode_21_186_cam_high.jpg", + "images/task_031_episode_21_201_cam_high.jpg", + "images/task_031_episode_21_216_cam_high.jpg", + "images/task_031_episode_21_231_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Up" + } + ], + "images": [ + "images/task_031_episode_21_192_cam_high.jpg", + "images/task_031_episode_21_207_cam_high.jpg", + "images/task_031_episode_21_222_cam_high.jpg", + "images/task_031_episode_21_237_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Up" + } + ], + "images": [ + "images/task_031_episode_21_198_cam_high.jpg", + "images/task_031_episode_21_213_cam_high.jpg", + "images/task_031_episode_21_228_cam_high.jpg", + "images/task_031_episode_21_243_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Up" + } + ], + "images": [ + "images/task_031_episode_21_204_cam_high.jpg", + "images/task_031_episode_21_219_cam_high.jpg", + "images/task_031_episode_21_234_cam_high.jpg", + "images/task_031_episode_21_249_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Up" + } + ], + "images": [ + "images/task_031_episode_21_210_cam_high.jpg", + "images/task_031_episode_21_225_cam_high.jpg", + "images/task_031_episode_21_240_cam_high.jpg", + "images/task_031_episode_21_255_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Up" + } + ], + "images": [ + "images/task_031_episode_21_216_cam_high.jpg", + "images/task_031_episode_21_231_cam_high.jpg", + "images/task_031_episode_21_246_cam_high.jpg", + "images/task_031_episode_21_261_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_031_episode_21_366_cam_high.jpg", + "images/task_031_episode_21_381_cam_high.jpg", + "images/task_031_episode_21_396_cam_high.jpg", + "images/task_031_episode_21_411_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_031_episode_21_372_cam_high.jpg", + "images/task_031_episode_21_387_cam_high.jpg", + "images/task_031_episode_21_402_cam_high.jpg", + "images/task_031_episode_21_417_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_031_episode_21_378_cam_high.jpg", + "images/task_031_episode_21_393_cam_high.jpg", + "images/task_031_episode_21_408_cam_high.jpg", + "images/task_031_episode_21_423_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Down" + } + ], + "images": [ + "images/task_031_episode_21_444_cam_high.jpg", + "images/task_031_episode_21_459_cam_high.jpg", + "images/task_031_episode_21_474_cam_high.jpg", + "images/task_031_episode_21_489_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Down" + } + ], + "images": [ + "images/task_031_episode_21_450_cam_high.jpg", + "images/task_031_episode_21_465_cam_high.jpg", + "images/task_031_episode_21_480_cam_high.jpg", + "images/task_031_episode_21_495_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Down" + } + ], + "images": [ + "images/task_031_episode_21_456_cam_high.jpg", + "images/task_031_episode_21_471_cam_high.jpg", + "images/task_031_episode_21_486_cam_high.jpg", + "images/task_031_episode_21_501_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_031_episode_21_714_cam_high.jpg", + "images/task_031_episode_21_729_cam_high.jpg", + "images/task_031_episode_21_744_cam_high.jpg", + "images/task_031_episode_21_759_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_031_episode_21_79_cam_high.jpg", + "images/task_031_episode_21_94_cam_high.jpg", + "images/task_031_episode_21_109_cam_high.jpg", + "images/task_031_episode_21_124_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_031_episode_21_423_cam_high.jpg", + "images/task_031_episode_21_438_cam_high.jpg", + "images/task_031_episode_21_453_cam_high.jpg", + "images/task_031_episode_21_468_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_031_episode_21_317_cam_high.jpg", + "images/task_031_episode_21_332_cam_high.jpg", + "images/task_031_episode_21_347_cam_high.jpg", + "images/task_031_episode_21_362_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_031_episode_21_225_cam_high.jpg", + "images/task_031_episode_21_240_cam_high.jpg", + "images/task_031_episode_21_255_cam_high.jpg", + "images/task_031_episode_21_270_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_031_episode_21_555_cam_high.jpg", + "images/task_031_episode_21_570_cam_high.jpg", + "images/task_031_episode_21_585_cam_high.jpg", + "images/task_031_episode_21_600_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_031_episode_21_132_cam_high.jpg", + "images/task_031_episode_21_147_cam_high.jpg", + "images/task_031_episode_21_162_cam_high.jpg", + "images/task_031_episode_21_177_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_031_episode_21_238_cam_high.jpg", + "images/task_031_episode_21_253_cam_high.jpg", + "images/task_031_episode_21_268_cam_high.jpg", + "images/task_031_episode_21_283_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_031_episode_21_0_cam_high.jpg", + "images/task_031_episode_21_15_cam_high.jpg", + "images/task_031_episode_21_30_cam_high.jpg", + "images/task_031_episode_21_45_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_031_episode_21_344_cam_high.jpg", + "images/task_031_episode_21_359_cam_high.jpg", + "images/task_031_episode_21_374_cam_high.jpg", + "images/task_031_episode_21_389_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_031_episode_21_582_cam_high.jpg", + "images/task_031_episode_21_597_cam_high.jpg", + "images/task_031_episode_21_612_cam_high.jpg", + "images/task_031_episode_21_627_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_031_episode_21_66_cam_high.jpg", + "images/task_031_episode_21_81_cam_high.jpg", + "images/task_031_episode_21_96_cam_high.jpg", + "images/task_031_episode_21_111_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_031_episode_21_229_cam_high.jpg", + "images/task_031_episode_21_232_cam_high.jpg", + "images/task_031_episode_21_235_cam_high.jpg", + "images/task_031_episode_21_238_cam_high.jpg", + "images/task_031_episode_21_241_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_031_episode_21_758_cam_high.jpg", + "images/task_031_episode_21_761_cam_high.jpg", + "images/task_031_episode_21_764_cam_high.jpg", + "images/task_031_episode_21_767_cam_high.jpg", + "images/task_031_episode_21_770_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_031_episode_21_123_cam_high.jpg", + "images/task_031_episode_21_126_cam_high.jpg", + "images/task_031_episode_21_129_cam_high.jpg", + "images/task_031_episode_21_132_cam_high.jpg", + "images/task_031_episode_21_135_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_031_episode_21_241_cam_high.jpg", + "images/task_031_episode_21_244_cam_high.jpg", + "images/task_031_episode_21_247_cam_high.jpg", + "images/task_031_episode_21_250_cam_high.jpg", + "images/task_031_episode_21_253_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_031_episode_21_206_cam_high.jpg", + "images/task_031_episode_21_209_cam_high.jpg", + "images/task_031_episode_21_212_cam_high.jpg", + "images/task_031_episode_21_215_cam_high.jpg", + "images/task_031_episode_21_218_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_031_episode_21_934_cam_high.jpg", + "images/task_031_episode_21_937_cam_high.jpg", + "images/task_031_episode_21_940_cam_high.jpg", + "images/task_031_episode_21_943_cam_high.jpg", + "images/task_031_episode_21_946_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_031_episode_21_958_cam_high.jpg", + "images/task_031_episode_21_961_cam_high.jpg", + "images/task_031_episode_21_964_cam_high.jpg", + "images/task_031_episode_21_967_cam_high.jpg", + "images/task_031_episode_21_970_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_031_episode_21_100_cam_high.jpg", + "images/task_031_episode_21_103_cam_high.jpg", + "images/task_031_episode_21_106_cam_high.jpg", + "images/task_031_episode_21_109_cam_high.jpg", + "images/task_031_episode_21_112_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Frame X happens before Frame Y" + } + ], + "images": [ + "images/task_031_episode_21_180_cam_high.jpg", + "images/task_031_episode_21_719_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Frame Y happens before Frame X" + } + ], + "images": [ + "images/task_031_episode_21_619_cam_high.jpg", + "images/task_031_episode_21_250_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Frame Y happens before Frame X" + } + ], + "images": [ + "images/task_031_episode_21_819_cam_high.jpg", + "images/task_031_episode_21_350_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_031_episode_21_685_cam_high.jpg", + "images/task_031_episode_21_790_cam_high.jpg", + "images/task_031_episode_21_737_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_031_episode_21_420_cam_high.jpg", + "images/task_031_episode_21_473_cam_high.jpg", + "images/task_031_episode_21_367_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_031_episode_21_896_cam_high.jpg", + "images/task_031_episode_21_843_cam_high.jpg", + "images/task_031_episode_21_790_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_031_episode_21_262_cam_high.jpg", + "images/task_031_episode_21_314_cam_high.jpg", + "images/task_031_episode_21_209_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_031_episode_21_314_cam_high.jpg", + "images/task_031_episode_21_367_cam_high.jpg", + "images/task_031_episode_21_420_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_031_episode_21_843_cam_high.jpg", + "images/task_031_episode_21_737_cam_high.jpg", + "images/task_031_episode_21_790_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_031_episode_21_314_cam_high.jpg", + "images/task_031_episode_21_367_cam_high.jpg", + "images/task_031_episode_21_262_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_031_episode_21_473_cam_high.jpg", + "images/task_031_episode_21_526_cam_high.jpg", + "images/task_031_episode_21_420_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_031_episode_21_156_cam_high.jpg", + "images/task_031_episode_21_103_cam_high.jpg", + "images/task_031_episode_21_209_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_031_episode_21_632_cam_high.jpg", + "images/task_031_episode_21_685_cam_high.jpg", + "images/task_031_episode_21_737_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_031_episode_21_262_cam_high.jpg", + "images/task_031_episode_21_156_cam_high.jpg", + "images/task_031_episode_21_209_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_031_episode_21_843_cam_high.jpg", + "images/task_031_episode_21_896_cam_high.jpg", + "images/task_031_episode_21_949_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_031_episode_21_526_cam_high.jpg", + "images/task_031_episode_21_579_cam_high.jpg", + "images/task_031_episode_21_473_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_031", + "split": "eval", + "split_group_id": "task_031_episode_21", + "recording_id": "task_031_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Left" + } + ], + "images": [ + "images/task_044_episode_16_0_cam_high.jpg", + "images/task_044_episode_16_15_cam_high.jpg", + "images/task_044_episode_16_30_cam_high.jpg", + "images/task_044_episode_16_45_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_16", + "recording_id": "task_044_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Left" + } + ], + "images": [ + "images/task_044_episode_16_6_cam_high.jpg", + "images/task_044_episode_16_21_cam_high.jpg", + "images/task_044_episode_16_36_cam_high.jpg", + "images/task_044_episode_16_51_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_16", + "recording_id": "task_044_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Left" + } + ], + "images": [ + "images/task_044_episode_16_12_cam_high.jpg", + "images/task_044_episode_16_27_cam_high.jpg", + "images/task_044_episode_16_42_cam_high.jpg", + "images/task_044_episode_16_57_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_16", + "recording_id": "task_044_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_044_episode_16_114_cam_high.jpg", + "images/task_044_episode_16_129_cam_high.jpg", + "images/task_044_episode_16_144_cam_high.jpg", + "images/task_044_episode_16_159_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_16", + "recording_id": "task_044_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_044_episode_16_120_cam_high.jpg", + "images/task_044_episode_16_135_cam_high.jpg", + "images/task_044_episode_16_150_cam_high.jpg", + "images/task_044_episode_16_165_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_16", + "recording_id": "task_044_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_044_episode_16_126_cam_high.jpg", + "images/task_044_episode_16_141_cam_high.jpg", + "images/task_044_episode_16_156_cam_high.jpg", + "images/task_044_episode_16_171_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_16", + "recording_id": "task_044_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_044_episode_16_198_cam_high.jpg", + "images/task_044_episode_16_213_cam_high.jpg", + "images/task_044_episode_16_228_cam_high.jpg", + "images/task_044_episode_16_243_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_16", + "recording_id": "task_044_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_044_episode_16_204_cam_high.jpg", + "images/task_044_episode_16_219_cam_high.jpg", + "images/task_044_episode_16_234_cam_high.jpg", + "images/task_044_episode_16_249_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_16", + "recording_id": "task_044_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_044_episode_16_210_cam_high.jpg", + "images/task_044_episode_16_225_cam_high.jpg", + "images/task_044_episode_16_240_cam_high.jpg", + "images/task_044_episode_16_255_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_16", + "recording_id": "task_044_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_044_episode_16_427_cam_high.jpg", + "images/task_044_episode_16_442_cam_high.jpg", + "images/task_044_episode_16_457_cam_high.jpg", + "images/task_044_episode_16_472_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_16", + "recording_id": "task_044_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_044_episode_16_346_cam_high.jpg", + "images/task_044_episode_16_361_cam_high.jpg", + "images/task_044_episode_16_376_cam_high.jpg", + "images/task_044_episode_16_391_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_16", + "recording_id": "task_044_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_044_episode_16_383_cam_high.jpg", + "images/task_044_episode_16_398_cam_high.jpg", + "images/task_044_episode_16_413_cam_high.jpg", + "images/task_044_episode_16_428_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_16", + "recording_id": "task_044_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_044_episode_16_365_cam_high.jpg", + "images/task_044_episode_16_380_cam_high.jpg", + "images/task_044_episode_16_395_cam_high.jpg", + "images/task_044_episode_16_410_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_16", + "recording_id": "task_044_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_044_episode_16_148_cam_high.jpg", + "images/task_044_episode_16_163_cam_high.jpg", + "images/task_044_episode_16_178_cam_high.jpg", + "images/task_044_episode_16_193_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_16", + "recording_id": "task_044_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_044_episode_16_377_cam_high.jpg", + "images/task_044_episode_16_392_cam_high.jpg", + "images/task_044_episode_16_407_cam_high.jpg", + "images/task_044_episode_16_422_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_16", + "recording_id": "task_044_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_044_episode_16_124_cam_high.jpg", + "images/task_044_episode_16_139_cam_high.jpg", + "images/task_044_episode_16_154_cam_high.jpg", + "images/task_044_episode_16_169_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_16", + "recording_id": "task_044_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_044_episode_16_62_cam_high.jpg", + "images/task_044_episode_16_77_cam_high.jpg", + "images/task_044_episode_16_92_cam_high.jpg", + "images/task_044_episode_16_107_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_16", + "recording_id": "task_044_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_044_episode_16_167_cam_high.jpg", + "images/task_044_episode_16_182_cam_high.jpg", + "images/task_044_episode_16_197_cam_high.jpg", + "images/task_044_episode_16_212_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_16", + "recording_id": "task_044_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_044_episode_16_111_cam_high.jpg", + "images/task_044_episode_16_126_cam_high.jpg", + "images/task_044_episode_16_141_cam_high.jpg", + "images/task_044_episode_16_156_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_16", + "recording_id": "task_044_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_044_episode_16_43_cam_high.jpg", + "images/task_044_episode_16_58_cam_high.jpg", + "images/task_044_episode_16_73_cam_high.jpg", + "images/task_044_episode_16_88_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_16", + "recording_id": "task_044_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_044_episode_16_372_cam_high.jpg", + "images/task_044_episode_16_375_cam_high.jpg", + "images/task_044_episode_16_378_cam_high.jpg", + "images/task_044_episode_16_381_cam_high.jpg", + "images/task_044_episode_16_384_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_16", + "recording_id": "task_044_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_044_episode_16_12_cam_high.jpg", + "images/task_044_episode_16_15_cam_high.jpg", + "images/task_044_episode_16_18_cam_high.jpg", + "images/task_044_episode_16_21_cam_high.jpg", + "images/task_044_episode_16_24_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_16", + "recording_id": "task_044_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_044_episode_16_252_cam_high.jpg", + "images/task_044_episode_16_255_cam_high.jpg", + "images/task_044_episode_16_258_cam_high.jpg", + "images/task_044_episode_16_261_cam_high.jpg", + "images/task_044_episode_16_264_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_16", + "recording_id": "task_044_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_044_episode_16_69_cam_high.jpg", + "images/task_044_episode_16_72_cam_high.jpg", + "images/task_044_episode_16_75_cam_high.jpg", + "images/task_044_episode_16_78_cam_high.jpg", + "images/task_044_episode_16_81_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_16", + "recording_id": "task_044_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_044_episode_16_6_cam_high.jpg", + "images/task_044_episode_16_9_cam_high.jpg", + "images/task_044_episode_16_12_cam_high.jpg", + "images/task_044_episode_16_15_cam_high.jpg", + "images/task_044_episode_16_18_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_16", + "recording_id": "task_044_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_044_episode_16_40_cam_high.jpg", + "images/task_044_episode_16_43_cam_high.jpg", + "images/task_044_episode_16_46_cam_high.jpg", + "images/task_044_episode_16_49_cam_high.jpg", + "images/task_044_episode_16_52_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_16", + "recording_id": "task_044_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_044_episode_16_338_cam_high.jpg", + "images/task_044_episode_16_341_cam_high.jpg", + "images/task_044_episode_16_344_cam_high.jpg", + "images/task_044_episode_16_347_cam_high.jpg", + "images/task_044_episode_16_350_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_16", + "recording_id": "task_044_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_044_episode_16_120_cam_high.jpg", + "images/task_044_episode_16_123_cam_high.jpg", + "images/task_044_episode_16_126_cam_high.jpg", + "images/task_044_episode_16_129_cam_high.jpg", + "images/task_044_episode_16_132_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_16", + "recording_id": "task_044_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Frame Y happens before Frame X" + } + ], + "images": [ + "images/task_044_episode_16_359_cam_high.jpg", + "images/task_044_episode_16_90_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_16", + "recording_id": "task_044_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Frame Y happens before Frame X" + } + ], + "images": [ + "images/task_044_episode_16_309_cam_high.jpg", + "images/task_044_episode_16_125_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_16", + "recording_id": "task_044_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Frame Y happens before Frame X" + } + ], + "images": [ + "images/task_044_episode_16_409_cam_high.jpg", + "images/task_044_episode_16_175_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_16", + "recording_id": "task_044_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_044_episode_16_421_cam_high.jpg", + "images/task_044_episode_16_368_cam_high.jpg", + "images/task_044_episode_16_395_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_16", + "recording_id": "task_044_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_044_episode_16_104_cam_high.jpg", + "images/task_044_episode_16_131_cam_high.jpg", + "images/task_044_episode_16_157_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_16", + "recording_id": "task_044_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_044_episode_16_210_cam_high.jpg", + "images/task_044_episode_16_157_cam_high.jpg", + "images/task_044_episode_16_183_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_16", + "recording_id": "task_044_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_044_episode_16_368_cam_high.jpg", + "images/task_044_episode_16_316_cam_high.jpg", + "images/task_044_episode_16_342_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_16", + "recording_id": "task_044_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_044_episode_16_78_cam_high.jpg", + "images/task_044_episode_16_51_cam_high.jpg", + "images/task_044_episode_16_25_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_16", + "recording_id": "task_044_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_044_episode_16_289_cam_high.jpg", + "images/task_044_episode_16_263_cam_high.jpg", + "images/task_044_episode_16_236_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_16", + "recording_id": "task_044_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_044_episode_16_368_cam_high.jpg", + "images/task_044_episode_16_342_cam_high.jpg", + "images/task_044_episode_16_395_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_16", + "recording_id": "task_044_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_044_episode_16_78_cam_high.jpg", + "images/task_044_episode_16_104_cam_high.jpg", + "images/task_044_episode_16_51_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_16", + "recording_id": "task_044_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_044_episode_16_183_cam_high.jpg", + "images/task_044_episode_16_210_cam_high.jpg", + "images/task_044_episode_16_236_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_16", + "recording_id": "task_044_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_044_episode_16_421_cam_high.jpg", + "images/task_044_episode_16_474_cam_high.jpg", + "images/task_044_episode_16_448_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_16", + "recording_id": "task_044_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_044_episode_16_78_cam_high.jpg", + "images/task_044_episode_16_131_cam_high.jpg", + "images/task_044_episode_16_104_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_16", + "recording_id": "task_044_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_044_episode_16_316_cam_high.jpg", + "images/task_044_episode_16_289_cam_high.jpg", + "images/task_044_episode_16_263_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_16", + "recording_id": "task_044_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_044_episode_16_448_cam_high.jpg", + "images/task_044_episode_16_395_cam_high.jpg", + "images/task_044_episode_16_421_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_16", + "recording_id": "task_044_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_044_episode_16_157_cam_high.jpg", + "images/task_044_episode_16_131_cam_high.jpg", + "images/task_044_episode_16_183_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_16", + "recording_id": "task_044_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Left" + } + ], + "images": [ + "images/task_044_episode_26_0_cam_high.jpg", + "images/task_044_episode_26_15_cam_high.jpg", + "images/task_044_episode_26_30_cam_high.jpg", + "images/task_044_episode_26_45_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_26", + "recording_id": "task_044_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Left" + } + ], + "images": [ + "images/task_044_episode_26_6_cam_high.jpg", + "images/task_044_episode_26_21_cam_high.jpg", + "images/task_044_episode_26_36_cam_high.jpg", + "images/task_044_episode_26_51_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_26", + "recording_id": "task_044_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Left" + } + ], + "images": [ + "images/task_044_episode_26_12_cam_high.jpg", + "images/task_044_episode_26_27_cam_high.jpg", + "images/task_044_episode_26_42_cam_high.jpg", + "images/task_044_episode_26_57_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_26", + "recording_id": "task_044_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_044_episode_26_114_cam_high.jpg", + "images/task_044_episode_26_129_cam_high.jpg", + "images/task_044_episode_26_144_cam_high.jpg", + "images/task_044_episode_26_159_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_26", + "recording_id": "task_044_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Down" + } + ], + "images": [ + "images/task_044_episode_26_114_cam_high.jpg", + "images/task_044_episode_26_129_cam_high.jpg", + "images/task_044_episode_26_144_cam_high.jpg", + "images/task_044_episode_26_159_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_26", + "recording_id": "task_044_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_044_episode_26_120_cam_high.jpg", + "images/task_044_episode_26_135_cam_high.jpg", + "images/task_044_episode_26_150_cam_high.jpg", + "images/task_044_episode_26_165_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_26", + "recording_id": "task_044_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Down\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_044_episode_26_126_cam_high.jpg", + "images/task_044_episode_26_141_cam_high.jpg", + "images/task_044_episode_26_156_cam_high.jpg", + "images/task_044_episode_26_171_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_26", + "recording_id": "task_044_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Down" + } + ], + "images": [ + "images/task_044_episode_26_204_cam_high.jpg", + "images/task_044_episode_26_219_cam_high.jpg", + "images/task_044_episode_26_234_cam_high.jpg", + "images/task_044_episode_26_249_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_26", + "recording_id": "task_044_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_044_episode_26_210_cam_high.jpg", + "images/task_044_episode_26_225_cam_high.jpg", + "images/task_044_episode_26_240_cam_high.jpg", + "images/task_044_episode_26_255_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_26", + "recording_id": "task_044_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_044_episode_26_216_cam_high.jpg", + "images/task_044_episode_26_231_cam_high.jpg", + "images/task_044_episode_26_246_cam_high.jpg", + "images/task_044_episode_26_261_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_26", + "recording_id": "task_044_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_044_episode_26_222_cam_high.jpg", + "images/task_044_episode_26_237_cam_high.jpg", + "images/task_044_episode_26_252_cam_high.jpg", + "images/task_044_episode_26_267_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_26", + "recording_id": "task_044_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_044_episode_26_346_cam_high.jpg", + "images/task_044_episode_26_361_cam_high.jpg", + "images/task_044_episode_26_376_cam_high.jpg", + "images/task_044_episode_26_391_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_26", + "recording_id": "task_044_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_044_episode_26_278_cam_high.jpg", + "images/task_044_episode_26_293_cam_high.jpg", + "images/task_044_episode_26_308_cam_high.jpg", + "images/task_044_episode_26_323_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_26", + "recording_id": "task_044_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_044_episode_26_315_cam_high.jpg", + "images/task_044_episode_26_330_cam_high.jpg", + "images/task_044_episode_26_345_cam_high.jpg", + "images/task_044_episode_26_360_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_26", + "recording_id": "task_044_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_044_episode_26_210_cam_high.jpg", + "images/task_044_episode_26_225_cam_high.jpg", + "images/task_044_episode_26_240_cam_high.jpg", + "images/task_044_episode_26_255_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_26", + "recording_id": "task_044_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_044_episode_26_254_cam_high.jpg", + "images/task_044_episode_26_269_cam_high.jpg", + "images/task_044_episode_26_284_cam_high.jpg", + "images/task_044_episode_26_299_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_26", + "recording_id": "task_044_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_044_episode_26_235_cam_high.jpg", + "images/task_044_episode_26_250_cam_high.jpg", + "images/task_044_episode_26_265_cam_high.jpg", + "images/task_044_episode_26_280_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_26", + "recording_id": "task_044_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_044_episode_26_31_cam_high.jpg", + "images/task_044_episode_26_46_cam_high.jpg", + "images/task_044_episode_26_61_cam_high.jpg", + "images/task_044_episode_26_76_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_26", + "recording_id": "task_044_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_044_episode_26_124_cam_high.jpg", + "images/task_044_episode_26_139_cam_high.jpg", + "images/task_044_episode_26_154_cam_high.jpg", + "images/task_044_episode_26_169_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_26", + "recording_id": "task_044_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_044_episode_26_37_cam_high.jpg", + "images/task_044_episode_26_52_cam_high.jpg", + "images/task_044_episode_26_67_cam_high.jpg", + "images/task_044_episode_26_82_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_26", + "recording_id": "task_044_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_044_episode_26_258_cam_high.jpg", + "images/task_044_episode_26_261_cam_high.jpg", + "images/task_044_episode_26_264_cam_high.jpg", + "images/task_044_episode_26_267_cam_high.jpg", + "images/task_044_episode_26_270_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_26", + "recording_id": "task_044_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_044_episode_26_458_cam_high.jpg", + "images/task_044_episode_26_461_cam_high.jpg", + "images/task_044_episode_26_464_cam_high.jpg", + "images/task_044_episode_26_467_cam_high.jpg", + "images/task_044_episode_26_470_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_26", + "recording_id": "task_044_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_044_episode_26_75_cam_high.jpg", + "images/task_044_episode_26_78_cam_high.jpg", + "images/task_044_episode_26_81_cam_high.jpg", + "images/task_044_episode_26_84_cam_high.jpg", + "images/task_044_episode_26_87_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_26", + "recording_id": "task_044_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_044_episode_26_452_cam_high.jpg", + "images/task_044_episode_26_455_cam_high.jpg", + "images/task_044_episode_26_458_cam_high.jpg", + "images/task_044_episode_26_461_cam_high.jpg", + "images/task_044_episode_26_464_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_26", + "recording_id": "task_044_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_044_episode_26_252_cam_high.jpg", + "images/task_044_episode_26_255_cam_high.jpg", + "images/task_044_episode_26_258_cam_high.jpg", + "images/task_044_episode_26_261_cam_high.jpg", + "images/task_044_episode_26_264_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_26", + "recording_id": "task_044_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_044_episode_26_458_cam_high.jpg", + "images/task_044_episode_26_461_cam_high.jpg", + "images/task_044_episode_26_464_cam_high.jpg", + "images/task_044_episode_26_467_cam_high.jpg", + "images/task_044_episode_26_470_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_26", + "recording_id": "task_044_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_044_episode_26_58_cam_high.jpg", + "images/task_044_episode_26_61_cam_high.jpg", + "images/task_044_episode_26_64_cam_high.jpg", + "images/task_044_episode_26_67_cam_high.jpg", + "images/task_044_episode_26_70_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_26", + "recording_id": "task_044_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_044_episode_26_298_cam_high.jpg", + "images/task_044_episode_26_301_cam_high.jpg", + "images/task_044_episode_26_304_cam_high.jpg", + "images/task_044_episode_26_307_cam_high.jpg", + "images/task_044_episode_26_310_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_26", + "recording_id": "task_044_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Frame Y happens before Frame X" + } + ], + "images": [ + "images/task_044_episode_26_359_cam_high.jpg", + "images/task_044_episode_26_90_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_26", + "recording_id": "task_044_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Frame Y happens before Frame X" + } + ], + "images": [ + "images/task_044_episode_26_309_cam_high.jpg", + "images/task_044_episode_26_125_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_26", + "recording_id": "task_044_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Frame X happens before Frame Y" + } + ], + "images": [ + "images/task_044_episode_26_175_cam_high.jpg", + "images/task_044_episode_26_409_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_26", + "recording_id": "task_044_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_044_episode_26_104_cam_high.jpg", + "images/task_044_episode_26_157_cam_high.jpg", + "images/task_044_episode_26_131_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_26", + "recording_id": "task_044_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_044_episode_26_395_cam_high.jpg", + "images/task_044_episode_26_368_cam_high.jpg", + "images/task_044_episode_26_421_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_26", + "recording_id": "task_044_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_044_episode_26_51_cam_high.jpg", + "images/task_044_episode_26_78_cam_high.jpg", + "images/task_044_episode_26_25_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_26", + "recording_id": "task_044_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_044_episode_26_236_cam_high.jpg", + "images/task_044_episode_26_289_cam_high.jpg", + "images/task_044_episode_26_263_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_26", + "recording_id": "task_044_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_044_episode_26_131_cam_high.jpg", + "images/task_044_episode_26_104_cam_high.jpg", + "images/task_044_episode_26_78_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_26", + "recording_id": "task_044_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_044_episode_26_316_cam_high.jpg", + "images/task_044_episode_26_263_cam_high.jpg", + "images/task_044_episode_26_289_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_26", + "recording_id": "task_044_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_044_episode_26_183_cam_high.jpg", + "images/task_044_episode_26_210_cam_high.jpg", + "images/task_044_episode_26_157_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_044", + "split": "eval", + "split_group_id": "task_044_episode_26", + "recording_id": "task_044_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_066_episode_13_0_cam_high.jpg", + "images/task_066_episode_13_15_cam_high.jpg", + "images/task_066_episode_13_30_cam_high.jpg", + "images/task_066_episode_13_45_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_13", + "recording_id": "task_066_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_066_episode_13_6_cam_high.jpg", + "images/task_066_episode_13_21_cam_high.jpg", + "images/task_066_episode_13_36_cam_high.jpg", + "images/task_066_episode_13_51_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_13", + "recording_id": "task_066_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Left\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_066_episode_13_12_cam_high.jpg", + "images/task_066_episode_13_27_cam_high.jpg", + "images/task_066_episode_13_42_cam_high.jpg", + "images/task_066_episode_13_57_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_13", + "recording_id": "task_066_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_066_episode_13_126_cam_high.jpg", + "images/task_066_episode_13_141_cam_high.jpg", + "images/task_066_episode_13_156_cam_high.jpg", + "images/task_066_episode_13_171_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_13", + "recording_id": "task_066_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_066_episode_13_132_cam_high.jpg", + "images/task_066_episode_13_147_cam_high.jpg", + "images/task_066_episode_13_162_cam_high.jpg", + "images/task_066_episode_13_177_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_13", + "recording_id": "task_066_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_066_episode_13_138_cam_high.jpg", + "images/task_066_episode_13_153_cam_high.jpg", + "images/task_066_episode_13_168_cam_high.jpg", + "images/task_066_episode_13_183_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_13", + "recording_id": "task_066_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Down" + } + ], + "images": [ + "images/task_066_episode_13_438_cam_high.jpg", + "images/task_066_episode_13_453_cam_high.jpg", + "images/task_066_episode_13_468_cam_high.jpg", + "images/task_066_episode_13_483_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_13", + "recording_id": "task_066_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_066_episode_13_216_cam_high.jpg", + "images/task_066_episode_13_231_cam_high.jpg", + "images/task_066_episode_13_246_cam_high.jpg", + "images/task_066_episode_13_261_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_13", + "recording_id": "task_066_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_066_episode_13_6_cam_high.jpg", + "images/task_066_episode_13_21_cam_high.jpg", + "images/task_066_episode_13_36_cam_high.jpg", + "images/task_066_episode_13_51_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_13", + "recording_id": "task_066_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_066_episode_13_99_cam_high.jpg", + "images/task_066_episode_13_114_cam_high.jpg", + "images/task_066_episode_13_129_cam_high.jpg", + "images/task_066_episode_13_144_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_13", + "recording_id": "task_066_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_066_episode_13_352_cam_high.jpg", + "images/task_066_episode_13_367_cam_high.jpg", + "images/task_066_episode_13_382_cam_high.jpg", + "images/task_066_episode_13_397_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_13", + "recording_id": "task_066_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_066_episode_13_161_cam_high.jpg", + "images/task_066_episode_13_176_cam_high.jpg", + "images/task_066_episode_13_191_cam_high.jpg", + "images/task_066_episode_13_206_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_13", + "recording_id": "task_066_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_066_episode_13_56_cam_high.jpg", + "images/task_066_episode_13_71_cam_high.jpg", + "images/task_066_episode_13_86_cam_high.jpg", + "images/task_066_episode_13_101_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_13", + "recording_id": "task_066_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_066_episode_13_31_cam_high.jpg", + "images/task_066_episode_13_46_cam_high.jpg", + "images/task_066_episode_13_61_cam_high.jpg", + "images/task_066_episode_13_76_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_13", + "recording_id": "task_066_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_066_episode_13_371_cam_high.jpg", + "images/task_066_episode_13_386_cam_high.jpg", + "images/task_066_episode_13_401_cam_high.jpg", + "images/task_066_episode_13_416_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_13", + "recording_id": "task_066_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_066_episode_13_173_cam_high.jpg", + "images/task_066_episode_13_188_cam_high.jpg", + "images/task_066_episode_13_203_cam_high.jpg", + "images/task_066_episode_13_218_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_13", + "recording_id": "task_066_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_066_episode_13_447_cam_high.jpg", + "images/task_066_episode_13_450_cam_high.jpg", + "images/task_066_episode_13_453_cam_high.jpg", + "images/task_066_episode_13_456_cam_high.jpg", + "images/task_066_episode_13_459_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_13", + "recording_id": "task_066_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_066_episode_13_138_cam_high.jpg", + "images/task_066_episode_13_141_cam_high.jpg", + "images/task_066_episode_13_144_cam_high.jpg", + "images/task_066_episode_13_147_cam_high.jpg", + "images/task_066_episode_13_150_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_13", + "recording_id": "task_066_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_066_episode_13_395_cam_high.jpg", + "images/task_066_episode_13_398_cam_high.jpg", + "images/task_066_episode_13_401_cam_high.jpg", + "images/task_066_episode_13_404_cam_high.jpg", + "images/task_066_episode_13_407_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_13", + "recording_id": "task_066_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_066_episode_13_29_cam_high.jpg", + "images/task_066_episode_13_32_cam_high.jpg", + "images/task_066_episode_13_35_cam_high.jpg", + "images/task_066_episode_13_38_cam_high.jpg", + "images/task_066_episode_13_41_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_13", + "recording_id": "task_066_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_066_episode_13_258_cam_high.jpg", + "images/task_066_episode_13_261_cam_high.jpg", + "images/task_066_episode_13_264_cam_high.jpg", + "images/task_066_episode_13_267_cam_high.jpg", + "images/task_066_episode_13_270_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_13", + "recording_id": "task_066_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_066_episode_13_103_cam_high.jpg", + "images/task_066_episode_13_106_cam_high.jpg", + "images/task_066_episode_13_109_cam_high.jpg", + "images/task_066_episode_13_112_cam_high.jpg", + "images/task_066_episode_13_115_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_13", + "recording_id": "task_066_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_066_episode_13_23_cam_high.jpg", + "images/task_066_episode_13_26_cam_high.jpg", + "images/task_066_episode_13_29_cam_high.jpg", + "images/task_066_episode_13_32_cam_high.jpg", + "images/task_066_episode_13_35_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_13", + "recording_id": "task_066_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_066_episode_13_35_cam_high.jpg", + "images/task_066_episode_13_38_cam_high.jpg", + "images/task_066_episode_13_41_cam_high.jpg", + "images/task_066_episode_13_44_cam_high.jpg", + "images/task_066_episode_13_47_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_13", + "recording_id": "task_066_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Frame X happens before Frame Y" + } + ], + "images": [ + "images/task_066_episode_13_90_cam_high.jpg", + "images/task_066_episode_13_359_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_13", + "recording_id": "task_066_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Frame Y happens before Frame X" + } + ], + "images": [ + "images/task_066_episode_13_309_cam_high.jpg", + "images/task_066_episode_13_125_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_13", + "recording_id": "task_066_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Frame X happens before Frame Y" + } + ], + "images": [ + "images/task_066_episode_13_175_cam_high.jpg", + "images/task_066_episode_13_409_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_13", + "recording_id": "task_066_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_066_episode_13_263_cam_high.jpg", + "images/task_066_episode_13_236_cam_high.jpg", + "images/task_066_episode_13_289_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_13", + "recording_id": "task_066_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_066_episode_13_157_cam_high.jpg", + "images/task_066_episode_13_183_cam_high.jpg", + "images/task_066_episode_13_131_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_13", + "recording_id": "task_066_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_066_episode_13_104_cam_high.jpg", + "images/task_066_episode_13_131_cam_high.jpg", + "images/task_066_episode_13_78_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_13", + "recording_id": "task_066_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_066_episode_13_421_cam_high.jpg", + "images/task_066_episode_13_474_cam_high.jpg", + "images/task_066_episode_13_448_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_13", + "recording_id": "task_066_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_066_episode_13_342_cam_high.jpg", + "images/task_066_episode_13_368_cam_high.jpg", + "images/task_066_episode_13_316_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_13", + "recording_id": "task_066_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_066_episode_13_263_cam_high.jpg", + "images/task_066_episode_13_316_cam_high.jpg", + "images/task_066_episode_13_289_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_13", + "recording_id": "task_066_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_066_episode_13_157_cam_high.jpg", + "images/task_066_episode_13_210_cam_high.jpg", + "images/task_066_episode_13_183_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_13", + "recording_id": "task_066_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_066_episode_13_157_cam_high.jpg", + "images/task_066_episode_13_104_cam_high.jpg", + "images/task_066_episode_13_131_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_13", + "recording_id": "task_066_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_066_episode_21_0_cam_high.jpg", + "images/task_066_episode_21_15_cam_high.jpg", + "images/task_066_episode_21_30_cam_high.jpg", + "images/task_066_episode_21_45_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_21", + "recording_id": "task_066_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Left\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_066_episode_21_6_cam_high.jpg", + "images/task_066_episode_21_21_cam_high.jpg", + "images/task_066_episode_21_36_cam_high.jpg", + "images/task_066_episode_21_51_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_21", + "recording_id": "task_066_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_066_episode_21_12_cam_high.jpg", + "images/task_066_episode_21_27_cam_high.jpg", + "images/task_066_episode_21_42_cam_high.jpg", + "images/task_066_episode_21_57_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_21", + "recording_id": "task_066_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_066_episode_21_108_cam_high.jpg", + "images/task_066_episode_21_123_cam_high.jpg", + "images/task_066_episode_21_138_cam_high.jpg", + "images/task_066_episode_21_153_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_21", + "recording_id": "task_066_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Left\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_066_episode_21_114_cam_high.jpg", + "images/task_066_episode_21_129_cam_high.jpg", + "images/task_066_episode_21_144_cam_high.jpg", + "images/task_066_episode_21_159_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_21", + "recording_id": "task_066_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_066_episode_21_120_cam_high.jpg", + "images/task_066_episode_21_135_cam_high.jpg", + "images/task_066_episode_21_150_cam_high.jpg", + "images/task_066_episode_21_165_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_21", + "recording_id": "task_066_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_066_episode_21_328_cam_high.jpg", + "images/task_066_episode_21_343_cam_high.jpg", + "images/task_066_episode_21_358_cam_high.jpg", + "images/task_066_episode_21_373_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_21", + "recording_id": "task_066_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_066_episode_21_6_cam_high.jpg", + "images/task_066_episode_21_21_cam_high.jpg", + "images/task_066_episode_21_36_cam_high.jpg", + "images/task_066_episode_21_51_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_21", + "recording_id": "task_066_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_066_episode_21_93_cam_high.jpg", + "images/task_066_episode_21_108_cam_high.jpg", + "images/task_066_episode_21_123_cam_high.jpg", + "images/task_066_episode_21_138_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_21", + "recording_id": "task_066_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_066_episode_21_272_cam_high.jpg", + "images/task_066_episode_21_287_cam_high.jpg", + "images/task_066_episode_21_302_cam_high.jpg", + "images/task_066_episode_21_317_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_21", + "recording_id": "task_066_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_066_episode_21_99_cam_high.jpg", + "images/task_066_episode_21_114_cam_high.jpg", + "images/task_066_episode_21_129_cam_high.jpg", + "images/task_066_episode_21_144_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_21", + "recording_id": "task_066_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_066_episode_21_254_cam_high.jpg", + "images/task_066_episode_21_269_cam_high.jpg", + "images/task_066_episode_21_284_cam_high.jpg", + "images/task_066_episode_21_299_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_21", + "recording_id": "task_066_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_066_episode_21_49_cam_high.jpg", + "images/task_066_episode_21_64_cam_high.jpg", + "images/task_066_episode_21_79_cam_high.jpg", + "images/task_066_episode_21_94_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_21", + "recording_id": "task_066_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_066_episode_21_74_cam_high.jpg", + "images/task_066_episode_21_89_cam_high.jpg", + "images/task_066_episode_21_104_cam_high.jpg", + "images/task_066_episode_21_119_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_21", + "recording_id": "task_066_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_066_episode_21_0_cam_high.jpg", + "images/task_066_episode_21_15_cam_high.jpg", + "images/task_066_episode_21_30_cam_high.jpg", + "images/task_066_episode_21_45_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_21", + "recording_id": "task_066_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_066_episode_21_195_cam_high.jpg", + "images/task_066_episode_21_198_cam_high.jpg", + "images/task_066_episode_21_201_cam_high.jpg", + "images/task_066_episode_21_204_cam_high.jpg", + "images/task_066_episode_21_207_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_21", + "recording_id": "task_066_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_066_episode_21_269_cam_high.jpg", + "images/task_066_episode_21_272_cam_high.jpg", + "images/task_066_episode_21_275_cam_high.jpg", + "images/task_066_episode_21_278_cam_high.jpg", + "images/task_066_episode_21_281_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_21", + "recording_id": "task_066_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_066_episode_21_475_cam_high.jpg", + "images/task_066_episode_21_478_cam_high.jpg", + "images/task_066_episode_21_481_cam_high.jpg", + "images/task_066_episode_21_484_cam_high.jpg", + "images/task_066_episode_21_487_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_21", + "recording_id": "task_066_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_066_episode_21_424_cam_high.jpg", + "images/task_066_episode_21_427_cam_high.jpg", + "images/task_066_episode_21_430_cam_high.jpg", + "images/task_066_episode_21_433_cam_high.jpg", + "images/task_066_episode_21_436_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_21", + "recording_id": "task_066_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_066_episode_21_384_cam_high.jpg", + "images/task_066_episode_21_387_cam_high.jpg", + "images/task_066_episode_21_390_cam_high.jpg", + "images/task_066_episode_21_393_cam_high.jpg", + "images/task_066_episode_21_396_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_21", + "recording_id": "task_066_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_066_episode_21_40_cam_high.jpg", + "images/task_066_episode_21_43_cam_high.jpg", + "images/task_066_episode_21_46_cam_high.jpg", + "images/task_066_episode_21_49_cam_high.jpg", + "images/task_066_episode_21_52_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_21", + "recording_id": "task_066_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_066_episode_21_178_cam_high.jpg", + "images/task_066_episode_21_181_cam_high.jpg", + "images/task_066_episode_21_184_cam_high.jpg", + "images/task_066_episode_21_187_cam_high.jpg", + "images/task_066_episode_21_190_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_21", + "recording_id": "task_066_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_066_episode_21_75_cam_high.jpg", + "images/task_066_episode_21_78_cam_high.jpg", + "images/task_066_episode_21_81_cam_high.jpg", + "images/task_066_episode_21_84_cam_high.jpg", + "images/task_066_episode_21_87_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_21", + "recording_id": "task_066_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Frame Y happens before Frame X" + } + ], + "images": [ + "images/task_066_episode_21_359_cam_high.jpg", + "images/task_066_episode_21_90_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_21", + "recording_id": "task_066_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Frame X happens before Frame Y" + } + ], + "images": [ + "images/task_066_episode_21_125_cam_high.jpg", + "images/task_066_episode_21_309_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_21", + "recording_id": "task_066_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Frame Y happens before Frame X" + } + ], + "images": [ + "images/task_066_episode_21_409_cam_high.jpg", + "images/task_066_episode_21_175_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_21", + "recording_id": "task_066_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_066_episode_21_78_cam_high.jpg", + "images/task_066_episode_21_51_cam_high.jpg", + "images/task_066_episode_21_104_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_21", + "recording_id": "task_066_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_066_episode_21_395_cam_high.jpg", + "images/task_066_episode_21_368_cam_high.jpg", + "images/task_066_episode_21_421_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_21", + "recording_id": "task_066_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_066_episode_21_289_cam_high.jpg", + "images/task_066_episode_21_342_cam_high.jpg", + "images/task_066_episode_21_316_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_21", + "recording_id": "task_066_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_066_episode_21_289_cam_high.jpg", + "images/task_066_episode_21_263_cam_high.jpg", + "images/task_066_episode_21_236_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_21", + "recording_id": "task_066_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_066_episode_21_131_cam_high.jpg", + "images/task_066_episode_21_78_cam_high.jpg", + "images/task_066_episode_21_104_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_21", + "recording_id": "task_066_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_066_episode_21_210_cam_high.jpg", + "images/task_066_episode_21_236_cam_high.jpg", + "images/task_066_episode_21_183_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_21", + "recording_id": "task_066_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_066_episode_21_157_cam_high.jpg", + "images/task_066_episode_21_183_cam_high.jpg", + "images/task_066_episode_21_131_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_21", + "recording_id": "task_066_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_066_episode_21_421_cam_high.jpg", + "images/task_066_episode_21_474_cam_high.jpg", + "images/task_066_episode_21_448_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_21", + "recording_id": "task_066_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_066_episode_21_131_cam_high.jpg", + "images/task_066_episode_21_157_cam_high.jpg", + "images/task_066_episode_21_104_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_21", + "recording_id": "task_066_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_066_episode_39_0_cam_high.jpg", + "images/task_066_episode_39_15_cam_high.jpg", + "images/task_066_episode_39_30_cam_high.jpg", + "images/task_066_episode_39_45_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_39", + "recording_id": "task_066_episode_39", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_066_episode_39_6_cam_high.jpg", + "images/task_066_episode_39_21_cam_high.jpg", + "images/task_066_episode_39_36_cam_high.jpg", + "images/task_066_episode_39_51_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_39", + "recording_id": "task_066_episode_39", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_066_episode_39_12_cam_high.jpg", + "images/task_066_episode_39_27_cam_high.jpg", + "images/task_066_episode_39_42_cam_high.jpg", + "images/task_066_episode_39_57_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_39", + "recording_id": "task_066_episode_39", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_066_episode_39_90_cam_high.jpg", + "images/task_066_episode_39_105_cam_high.jpg", + "images/task_066_episode_39_120_cam_high.jpg", + "images/task_066_episode_39_135_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_39", + "recording_id": "task_066_episode_39", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_066_episode_39_96_cam_high.jpg", + "images/task_066_episode_39_111_cam_high.jpg", + "images/task_066_episode_39_126_cam_high.jpg", + "images/task_066_episode_39_141_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_39", + "recording_id": "task_066_episode_39", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Left\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_066_episode_39_102_cam_high.jpg", + "images/task_066_episode_39_117_cam_high.jpg", + "images/task_066_episode_39_132_cam_high.jpg", + "images/task_066_episode_39_147_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_39", + "recording_id": "task_066_episode_39", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Down" + } + ], + "images": [ + "images/task_066_episode_39_138_cam_high.jpg", + "images/task_066_episode_39_153_cam_high.jpg", + "images/task_066_episode_39_168_cam_high.jpg", + "images/task_066_episode_39_183_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_39", + "recording_id": "task_066_episode_39", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_066_episode_39_408_cam_high.jpg", + "images/task_066_episode_39_423_cam_high.jpg", + "images/task_066_episode_39_438_cam_high.jpg", + "images/task_066_episode_39_453_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_39", + "recording_id": "task_066_episode_39", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_066_episode_39_284_cam_high.jpg", + "images/task_066_episode_39_299_cam_high.jpg", + "images/task_066_episode_39_314_cam_high.jpg", + "images/task_066_episode_39_329_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_39", + "recording_id": "task_066_episode_39", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_066_episode_39_396_cam_high.jpg", + "images/task_066_episode_39_411_cam_high.jpg", + "images/task_066_episode_39_426_cam_high.jpg", + "images/task_066_episode_39_441_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_39", + "recording_id": "task_066_episode_39", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_066_episode_39_427_cam_high.jpg", + "images/task_066_episode_39_442_cam_high.jpg", + "images/task_066_episode_39_457_cam_high.jpg", + "images/task_066_episode_39_472_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_39", + "recording_id": "task_066_episode_39", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_066_episode_39_142_cam_high.jpg", + "images/task_066_episode_39_157_cam_high.jpg", + "images/task_066_episode_39_172_cam_high.jpg", + "images/task_066_episode_39_187_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_39", + "recording_id": "task_066_episode_39", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_066_episode_39_136_cam_high.jpg", + "images/task_066_episode_39_151_cam_high.jpg", + "images/task_066_episode_39_166_cam_high.jpg", + "images/task_066_episode_39_181_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_39", + "recording_id": "task_066_episode_39", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_066_episode_39_0_cam_high.jpg", + "images/task_066_episode_39_15_cam_high.jpg", + "images/task_066_episode_39_30_cam_high.jpg", + "images/task_066_episode_39_45_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_39", + "recording_id": "task_066_episode_39", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_066_episode_39_37_cam_high.jpg", + "images/task_066_episode_39_52_cam_high.jpg", + "images/task_066_episode_39_67_cam_high.jpg", + "images/task_066_episode_39_82_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_39", + "recording_id": "task_066_episode_39", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_066_episode_39_58_cam_high.jpg", + "images/task_066_episode_39_61_cam_high.jpg", + "images/task_066_episode_39_64_cam_high.jpg", + "images/task_066_episode_39_67_cam_high.jpg", + "images/task_066_episode_39_70_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_39", + "recording_id": "task_066_episode_39", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_066_episode_39_35_cam_high.jpg", + "images/task_066_episode_39_38_cam_high.jpg", + "images/task_066_episode_39_41_cam_high.jpg", + "images/task_066_episode_39_44_cam_high.jpg", + "images/task_066_episode_39_47_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_39", + "recording_id": "task_066_episode_39", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_066_episode_39_412_cam_high.jpg", + "images/task_066_episode_39_415_cam_high.jpg", + "images/task_066_episode_39_418_cam_high.jpg", + "images/task_066_episode_39_421_cam_high.jpg", + "images/task_066_episode_39_424_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_39", + "recording_id": "task_066_episode_39", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_066_episode_39_206_cam_high.jpg", + "images/task_066_episode_39_209_cam_high.jpg", + "images/task_066_episode_39_212_cam_high.jpg", + "images/task_066_episode_39_215_cam_high.jpg", + "images/task_066_episode_39_218_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_39", + "recording_id": "task_066_episode_39", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_066_episode_39_447_cam_high.jpg", + "images/task_066_episode_39_450_cam_high.jpg", + "images/task_066_episode_39_453_cam_high.jpg", + "images/task_066_episode_39_456_cam_high.jpg", + "images/task_066_episode_39_459_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_39", + "recording_id": "task_066_episode_39", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_066_episode_39_264_cam_high.jpg", + "images/task_066_episode_39_267_cam_high.jpg", + "images/task_066_episode_39_270_cam_high.jpg", + "images/task_066_episode_39_273_cam_high.jpg", + "images/task_066_episode_39_276_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_39", + "recording_id": "task_066_episode_39", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_066_episode_39_52_cam_high.jpg", + "images/task_066_episode_39_55_cam_high.jpg", + "images/task_066_episode_39_58_cam_high.jpg", + "images/task_066_episode_39_61_cam_high.jpg", + "images/task_066_episode_39_64_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_39", + "recording_id": "task_066_episode_39", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_066_episode_39_40_cam_high.jpg", + "images/task_066_episode_39_43_cam_high.jpg", + "images/task_066_episode_39_46_cam_high.jpg", + "images/task_066_episode_39_49_cam_high.jpg", + "images/task_066_episode_39_52_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_39", + "recording_id": "task_066_episode_39", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Frame Y happens before Frame X" + } + ], + "images": [ + "images/task_066_episode_39_359_cam_high.jpg", + "images/task_066_episode_39_90_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_39", + "recording_id": "task_066_episode_39", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Frame X happens before Frame Y" + } + ], + "images": [ + "images/task_066_episode_39_125_cam_high.jpg", + "images/task_066_episode_39_309_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_39", + "recording_id": "task_066_episode_39", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Frame X happens before Frame Y" + } + ], + "images": [ + "images/task_066_episode_39_175_cam_high.jpg", + "images/task_066_episode_39_409_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_39", + "recording_id": "task_066_episode_39", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_066_episode_39_236_cam_high.jpg", + "images/task_066_episode_39_183_cam_high.jpg", + "images/task_066_episode_39_210_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_39", + "recording_id": "task_066_episode_39", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_066_episode_39_236_cam_high.jpg", + "images/task_066_episode_39_210_cam_high.jpg", + "images/task_066_episode_39_263_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_39", + "recording_id": "task_066_episode_39", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_066_episode_39_104_cam_high.jpg", + "images/task_066_episode_39_131_cam_high.jpg", + "images/task_066_episode_39_78_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_39", + "recording_id": "task_066_episode_39", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_066_episode_39_157_cam_high.jpg", + "images/task_066_episode_39_183_cam_high.jpg", + "images/task_066_episode_39_210_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_39", + "recording_id": "task_066_episode_39", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_066_episode_39_421_cam_high.jpg", + "images/task_066_episode_39_448_cam_high.jpg", + "images/task_066_episode_39_474_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_39", + "recording_id": "task_066_episode_39", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_066_episode_39_131_cam_high.jpg", + "images/task_066_episode_39_157_cam_high.jpg", + "images/task_066_episode_39_104_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_39", + "recording_id": "task_066_episode_39", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_066_episode_39_236_cam_high.jpg", + "images/task_066_episode_39_263_cam_high.jpg", + "images/task_066_episode_39_289_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_39", + "recording_id": "task_066_episode_39", + "episode_id": null, + "camera": "cam_high", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_066_episode_39_78_cam_high.jpg", + "images/task_066_episode_39_51_cam_high.jpg", + "images/task_066_episode_39_104_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_39", + "recording_id": "task_066_episode_39", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_066_episode_39_316_cam_high.jpg", + "images/task_066_episode_39_289_cam_high.jpg", + "images/task_066_episode_39_342_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_39", + "recording_id": "task_066_episode_39", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_066_episode_5_0_cam_high.jpg", + "images/task_066_episode_5_15_cam_high.jpg", + "images/task_066_episode_5_30_cam_high.jpg", + "images/task_066_episode_5_45_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_5", + "recording_id": "task_066_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_066_episode_5_6_cam_high.jpg", + "images/task_066_episode_5_21_cam_high.jpg", + "images/task_066_episode_5_36_cam_high.jpg", + "images/task_066_episode_5_51_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_5", + "recording_id": "task_066_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_066_episode_5_12_cam_high.jpg", + "images/task_066_episode_5_27_cam_high.jpg", + "images/task_066_episode_5_42_cam_high.jpg", + "images/task_066_episode_5_57_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_5", + "recording_id": "task_066_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_066_episode_5_132_cam_high.jpg", + "images/task_066_episode_5_147_cam_high.jpg", + "images/task_066_episode_5_162_cam_high.jpg", + "images/task_066_episode_5_177_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_5", + "recording_id": "task_066_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_066_episode_5_138_cam_high.jpg", + "images/task_066_episode_5_153_cam_high.jpg", + "images/task_066_episode_5_168_cam_high.jpg", + "images/task_066_episode_5_183_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_5", + "recording_id": "task_066_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_066_episode_5_144_cam_high.jpg", + "images/task_066_episode_5_159_cam_high.jpg", + "images/task_066_episode_5_174_cam_high.jpg", + "images/task_066_episode_5_189_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_5", + "recording_id": "task_066_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_066_episode_5_254_cam_high.jpg", + "images/task_066_episode_5_269_cam_high.jpg", + "images/task_066_episode_5_284_cam_high.jpg", + "images/task_066_episode_5_299_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_5", + "recording_id": "task_066_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_066_episode_5_334_cam_high.jpg", + "images/task_066_episode_5_349_cam_high.jpg", + "images/task_066_episode_5_364_cam_high.jpg", + "images/task_066_episode_5_379_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_5", + "recording_id": "task_066_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_066_episode_5_192_cam_high.jpg", + "images/task_066_episode_5_207_cam_high.jpg", + "images/task_066_episode_5_222_cam_high.jpg", + "images/task_066_episode_5_237_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_5", + "recording_id": "task_066_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_066_episode_5_25_cam_high.jpg", + "images/task_066_episode_5_40_cam_high.jpg", + "images/task_066_episode_5_55_cam_high.jpg", + "images/task_066_episode_5_70_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_5", + "recording_id": "task_066_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_066_episode_5_284_cam_high.jpg", + "images/task_066_episode_5_299_cam_high.jpg", + "images/task_066_episode_5_314_cam_high.jpg", + "images/task_066_episode_5_329_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_5", + "recording_id": "task_066_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_066_episode_5_340_cam_high.jpg", + "images/task_066_episode_5_355_cam_high.jpg", + "images/task_066_episode_5_370_cam_high.jpg", + "images/task_066_episode_5_385_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_5", + "recording_id": "task_066_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_066_episode_5_167_cam_high.jpg", + "images/task_066_episode_5_182_cam_high.jpg", + "images/task_066_episode_5_197_cam_high.jpg", + "images/task_066_episode_5_212_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_5", + "recording_id": "task_066_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_066_episode_5_173_cam_high.jpg", + "images/task_066_episode_5_188_cam_high.jpg", + "images/task_066_episode_5_203_cam_high.jpg", + "images/task_066_episode_5_218_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_5", + "recording_id": "task_066_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_066_episode_5_12_cam_high.jpg", + "images/task_066_episode_5_27_cam_high.jpg", + "images/task_066_episode_5_42_cam_high.jpg", + "images/task_066_episode_5_57_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_5", + "recording_id": "task_066_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_066_episode_5_6_cam_high.jpg", + "images/task_066_episode_5_21_cam_high.jpg", + "images/task_066_episode_5_36_cam_high.jpg", + "images/task_066_episode_5_51_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_5", + "recording_id": "task_066_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_066_episode_5_359_cam_high.jpg", + "images/task_066_episode_5_374_cam_high.jpg", + "images/task_066_episode_5_389_cam_high.jpg", + "images/task_066_episode_5_404_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_5", + "recording_id": "task_066_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_066_episode_5_138_cam_high.jpg", + "images/task_066_episode_5_141_cam_high.jpg", + "images/task_066_episode_5_144_cam_high.jpg", + "images/task_066_episode_5_147_cam_high.jpg", + "images/task_066_episode_5_150_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_5", + "recording_id": "task_066_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_066_episode_5_304_cam_high.jpg", + "images/task_066_episode_5_307_cam_high.jpg", + "images/task_066_episode_5_310_cam_high.jpg", + "images/task_066_episode_5_313_cam_high.jpg", + "images/task_066_episode_5_316_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_5", + "recording_id": "task_066_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_066_episode_5_344_cam_high.jpg", + "images/task_066_episode_5_347_cam_high.jpg", + "images/task_066_episode_5_350_cam_high.jpg", + "images/task_066_episode_5_353_cam_high.jpg", + "images/task_066_episode_5_356_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_5", + "recording_id": "task_066_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_066_episode_5_189_cam_high.jpg", + "images/task_066_episode_5_192_cam_high.jpg", + "images/task_066_episode_5_195_cam_high.jpg", + "images/task_066_episode_5_198_cam_high.jpg", + "images/task_066_episode_5_201_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_5", + "recording_id": "task_066_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_066_episode_5_35_cam_high.jpg", + "images/task_066_episode_5_38_cam_high.jpg", + "images/task_066_episode_5_41_cam_high.jpg", + "images/task_066_episode_5_44_cam_high.jpg", + "images/task_066_episode_5_47_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_5", + "recording_id": "task_066_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_066_episode_5_424_cam_high.jpg", + "images/task_066_episode_5_427_cam_high.jpg", + "images/task_066_episode_5_430_cam_high.jpg", + "images/task_066_episode_5_433_cam_high.jpg", + "images/task_066_episode_5_436_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_5", + "recording_id": "task_066_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_066_episode_5_109_cam_high.jpg", + "images/task_066_episode_5_112_cam_high.jpg", + "images/task_066_episode_5_115_cam_high.jpg", + "images/task_066_episode_5_118_cam_high.jpg", + "images/task_066_episode_5_121_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_5", + "recording_id": "task_066_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_066_episode_5_166_cam_high.jpg", + "images/task_066_episode_5_169_cam_high.jpg", + "images/task_066_episode_5_172_cam_high.jpg", + "images/task_066_episode_5_175_cam_high.jpg", + "images/task_066_episode_5_178_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_5", + "recording_id": "task_066_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Frame X happens before Frame Y" + } + ], + "images": [ + "images/task_066_episode_5_90_cam_high.jpg", + "images/task_066_episode_5_359_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_5", + "recording_id": "task_066_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Frame X happens before Frame Y" + } + ], + "images": [ + "images/task_066_episode_5_125_cam_high.jpg", + "images/task_066_episode_5_309_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_5", + "recording_id": "task_066_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Frame Y happens before Frame X" + } + ], + "images": [ + "images/task_066_episode_5_409_cam_high.jpg", + "images/task_066_episode_5_175_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_5", + "recording_id": "task_066_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_066_episode_5_236_cam_high.jpg", + "images/task_066_episode_5_263_cam_high.jpg", + "images/task_066_episode_5_289_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_5", + "recording_id": "task_066_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_066_episode_5_157_cam_high.jpg", + "images/task_066_episode_5_183_cam_high.jpg", + "images/task_066_episode_5_131_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_5", + "recording_id": "task_066_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_066_episode_5_448_cam_high.jpg", + "images/task_066_episode_5_421_cam_high.jpg", + "images/task_066_episode_5_395_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_5", + "recording_id": "task_066_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_066_episode_5_421_cam_high.jpg", + "images/task_066_episode_5_395_cam_high.jpg", + "images/task_066_episode_5_368_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_5", + "recording_id": "task_066_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_066_episode_5_78_cam_high.jpg", + "images/task_066_episode_5_25_cam_high.jpg", + "images/task_066_episode_5_51_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_5", + "recording_id": "task_066_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_066_episode_5_342_cam_high.jpg", + "images/task_066_episode_5_368_cam_high.jpg", + "images/task_066_episode_5_316_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_5", + "recording_id": "task_066_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_066_episode_5_289_cam_high.jpg", + "images/task_066_episode_5_316_cam_high.jpg", + "images/task_066_episode_5_342_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_5", + "recording_id": "task_066_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_066_episode_5_210_cam_high.jpg", + "images/task_066_episode_5_263_cam_high.jpg", + "images/task_066_episode_5_236_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_5", + "recording_id": "task_066_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_066_episode_5_104_cam_high.jpg", + "images/task_066_episode_5_51_cam_high.jpg", + "images/task_066_episode_5_78_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_5", + "recording_id": "task_066_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_066_episode_5_104_cam_high.jpg", + "images/task_066_episode_5_157_cam_high.jpg", + "images/task_066_episode_5_131_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_5", + "recording_id": "task_066_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_066_episode_5_368_cam_high.jpg", + "images/task_066_episode_5_342_cam_high.jpg", + "images/task_066_episode_5_395_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_5", + "recording_id": "task_066_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_066_episode_5_104_cam_high.jpg", + "images/task_066_episode_5_131_cam_high.jpg", + "images/task_066_episode_5_78_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_066", + "split": "eval", + "split_group_id": "task_066_episode_5", + "recording_id": "task_066_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_067_episode_13_0_cam_high.jpg", + "images/task_067_episode_13_15_cam_high.jpg", + "images/task_067_episode_13_30_cam_high.jpg", + "images/task_067_episode_13_45_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_13", + "recording_id": "task_067_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_067_episode_13_6_cam_high.jpg", + "images/task_067_episode_13_21_cam_high.jpg", + "images/task_067_episode_13_36_cam_high.jpg", + "images/task_067_episode_13_51_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_13", + "recording_id": "task_067_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Down\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_067_episode_13_12_cam_high.jpg", + "images/task_067_episode_13_27_cam_high.jpg", + "images/task_067_episode_13_42_cam_high.jpg", + "images/task_067_episode_13_57_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_13", + "recording_id": "task_067_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_067_episode_13_108_cam_high.jpg", + "images/task_067_episode_13_123_cam_high.jpg", + "images/task_067_episode_13_138_cam_high.jpg", + "images/task_067_episode_13_153_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_13", + "recording_id": "task_067_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_067_episode_13_114_cam_high.jpg", + "images/task_067_episode_13_129_cam_high.jpg", + "images/task_067_episode_13_144_cam_high.jpg", + "images/task_067_episode_13_159_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_13", + "recording_id": "task_067_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_067_episode_13_120_cam_high.jpg", + "images/task_067_episode_13_135_cam_high.jpg", + "images/task_067_episode_13_150_cam_high.jpg", + "images/task_067_episode_13_165_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_13", + "recording_id": "task_067_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_067_episode_13_402_cam_high.jpg", + "images/task_067_episode_13_417_cam_high.jpg", + "images/task_067_episode_13_432_cam_high.jpg", + "images/task_067_episode_13_447_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_13", + "recording_id": "task_067_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_067_episode_13_272_cam_high.jpg", + "images/task_067_episode_13_287_cam_high.jpg", + "images/task_067_episode_13_302_cam_high.jpg", + "images/task_067_episode_13_317_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_13", + "recording_id": "task_067_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_067_episode_13_408_cam_high.jpg", + "images/task_067_episode_13_423_cam_high.jpg", + "images/task_067_episode_13_438_cam_high.jpg", + "images/task_067_episode_13_453_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_13", + "recording_id": "task_067_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_067_episode_13_31_cam_high.jpg", + "images/task_067_episode_13_46_cam_high.jpg", + "images/task_067_episode_13_61_cam_high.jpg", + "images/task_067_episode_13_76_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_13", + "recording_id": "task_067_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_067_episode_13_365_cam_high.jpg", + "images/task_067_episode_13_380_cam_high.jpg", + "images/task_067_episode_13_395_cam_high.jpg", + "images/task_067_episode_13_410_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_13", + "recording_id": "task_067_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_067_episode_13_19_cam_high.jpg", + "images/task_067_episode_13_34_cam_high.jpg", + "images/task_067_episode_13_49_cam_high.jpg", + "images/task_067_episode_13_64_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_13", + "recording_id": "task_067_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_067_episode_13_12_cam_high.jpg", + "images/task_067_episode_13_27_cam_high.jpg", + "images/task_067_episode_13_42_cam_high.jpg", + "images/task_067_episode_13_57_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_13", + "recording_id": "task_067_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_067_episode_13_395_cam_high.jpg", + "images/task_067_episode_13_398_cam_high.jpg", + "images/task_067_episode_13_401_cam_high.jpg", + "images/task_067_episode_13_404_cam_high.jpg", + "images/task_067_episode_13_407_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_13", + "recording_id": "task_067_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_067_episode_13_441_cam_high.jpg", + "images/task_067_episode_13_444_cam_high.jpg", + "images/task_067_episode_13_447_cam_high.jpg", + "images/task_067_episode_13_450_cam_high.jpg", + "images/task_067_episode_13_453_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_13", + "recording_id": "task_067_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_067_episode_13_378_cam_high.jpg", + "images/task_067_episode_13_381_cam_high.jpg", + "images/task_067_episode_13_384_cam_high.jpg", + "images/task_067_episode_13_387_cam_high.jpg", + "images/task_067_episode_13_390_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_13", + "recording_id": "task_067_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_067_episode_13_447_cam_high.jpg", + "images/task_067_episode_13_450_cam_high.jpg", + "images/task_067_episode_13_453_cam_high.jpg", + "images/task_067_episode_13_456_cam_high.jpg", + "images/task_067_episode_13_459_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_13", + "recording_id": "task_067_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_067_episode_13_58_cam_high.jpg", + "images/task_067_episode_13_61_cam_high.jpg", + "images/task_067_episode_13_64_cam_high.jpg", + "images/task_067_episode_13_67_cam_high.jpg", + "images/task_067_episode_13_70_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_13", + "recording_id": "task_067_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_067_episode_13_63_cam_high.jpg", + "images/task_067_episode_13_66_cam_high.jpg", + "images/task_067_episode_13_69_cam_high.jpg", + "images/task_067_episode_13_72_cam_high.jpg", + "images/task_067_episode_13_75_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_13", + "recording_id": "task_067_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_067_episode_13_458_cam_high.jpg", + "images/task_067_episode_13_461_cam_high.jpg", + "images/task_067_episode_13_464_cam_high.jpg", + "images/task_067_episode_13_467_cam_high.jpg", + "images/task_067_episode_13_470_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_13", + "recording_id": "task_067_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_067_episode_13_435_cam_high.jpg", + "images/task_067_episode_13_438_cam_high.jpg", + "images/task_067_episode_13_441_cam_high.jpg", + "images/task_067_episode_13_444_cam_high.jpg", + "images/task_067_episode_13_447_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_13", + "recording_id": "task_067_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Frame X happens before Frame Y" + } + ], + "images": [ + "images/task_067_episode_13_90_cam_high.jpg", + "images/task_067_episode_13_359_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_13", + "recording_id": "task_067_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Frame X happens before Frame Y" + } + ], + "images": [ + "images/task_067_episode_13_125_cam_high.jpg", + "images/task_067_episode_13_309_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_13", + "recording_id": "task_067_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Frame X happens before Frame Y" + } + ], + "images": [ + "images/task_067_episode_13_175_cam_high.jpg", + "images/task_067_episode_13_409_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_13", + "recording_id": "task_067_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_067_episode_13_342_cam_high.jpg", + "images/task_067_episode_13_368_cam_high.jpg", + "images/task_067_episode_13_316_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_13", + "recording_id": "task_067_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_067_episode_13_448_cam_high.jpg", + "images/task_067_episode_13_421_cam_high.jpg", + "images/task_067_episode_13_395_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_13", + "recording_id": "task_067_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_067_episode_13_263_cam_high.jpg", + "images/task_067_episode_13_316_cam_high.jpg", + "images/task_067_episode_13_289_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_13", + "recording_id": "task_067_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_067_episode_13_421_cam_high.jpg", + "images/task_067_episode_13_474_cam_high.jpg", + "images/task_067_episode_13_448_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_13", + "recording_id": "task_067_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_067_episode_13_51_cam_high.jpg", + "images/task_067_episode_13_78_cam_high.jpg", + "images/task_067_episode_13_104_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_13", + "recording_id": "task_067_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_067_episode_13_236_cam_high.jpg", + "images/task_067_episode_13_289_cam_high.jpg", + "images/task_067_episode_13_263_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_13", + "recording_id": "task_067_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_067_episode_13_210_cam_high.jpg", + "images/task_067_episode_13_183_cam_high.jpg", + "images/task_067_episode_13_236_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_13", + "recording_id": "task_067_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_067_episode_13_210_cam_high.jpg", + "images/task_067_episode_13_236_cam_high.jpg", + "images/task_067_episode_13_263_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_13", + "recording_id": "task_067_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_067_episode_13_210_cam_high.jpg", + "images/task_067_episode_13_157_cam_high.jpg", + "images/task_067_episode_13_183_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_13", + "recording_id": "task_067_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_067_episode_13_368_cam_high.jpg", + "images/task_067_episode_13_342_cam_high.jpg", + "images/task_067_episode_13_395_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_13", + "recording_id": "task_067_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_067_episode_13_25_cam_high.jpg", + "images/task_067_episode_13_51_cam_high.jpg", + "images/task_067_episode_13_78_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_13", + "recording_id": "task_067_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_067_episode_13_395_cam_high.jpg", + "images/task_067_episode_13_421_cam_high.jpg", + "images/task_067_episode_13_368_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_13", + "recording_id": "task_067_episode_13", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_067_episode_21_0_cam_high.jpg", + "images/task_067_episode_21_15_cam_high.jpg", + "images/task_067_episode_21_30_cam_high.jpg", + "images/task_067_episode_21_45_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_21", + "recording_id": "task_067_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_067_episode_21_6_cam_high.jpg", + "images/task_067_episode_21_21_cam_high.jpg", + "images/task_067_episode_21_36_cam_high.jpg", + "images/task_067_episode_21_51_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_21", + "recording_id": "task_067_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_067_episode_21_12_cam_high.jpg", + "images/task_067_episode_21_27_cam_high.jpg", + "images/task_067_episode_21_42_cam_high.jpg", + "images/task_067_episode_21_57_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_21", + "recording_id": "task_067_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_067_episode_21_114_cam_high.jpg", + "images/task_067_episode_21_129_cam_high.jpg", + "images/task_067_episode_21_144_cam_high.jpg", + "images/task_067_episode_21_159_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_21", + "recording_id": "task_067_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_067_episode_21_120_cam_high.jpg", + "images/task_067_episode_21_135_cam_high.jpg", + "images/task_067_episode_21_150_cam_high.jpg", + "images/task_067_episode_21_165_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_21", + "recording_id": "task_067_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_067_episode_21_126_cam_high.jpg", + "images/task_067_episode_21_141_cam_high.jpg", + "images/task_067_episode_21_156_cam_high.jpg", + "images/task_067_episode_21_171_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_21", + "recording_id": "task_067_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_067_episode_21_414_cam_high.jpg", + "images/task_067_episode_21_429_cam_high.jpg", + "images/task_067_episode_21_444_cam_high.jpg", + "images/task_067_episode_21_459_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_21", + "recording_id": "task_067_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_067_episode_21_142_cam_high.jpg", + "images/task_067_episode_21_157_cam_high.jpg", + "images/task_067_episode_21_172_cam_high.jpg", + "images/task_067_episode_21_187_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_21", + "recording_id": "task_067_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_067_episode_21_433_cam_high.jpg", + "images/task_067_episode_21_448_cam_high.jpg", + "images/task_067_episode_21_463_cam_high.jpg", + "images/task_067_episode_21_478_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_21", + "recording_id": "task_067_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_067_episode_21_260_cam_high.jpg", + "images/task_067_episode_21_275_cam_high.jpg", + "images/task_067_episode_21_290_cam_high.jpg", + "images/task_067_episode_21_305_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_21", + "recording_id": "task_067_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_067_episode_21_365_cam_high.jpg", + "images/task_067_episode_21_380_cam_high.jpg", + "images/task_067_episode_21_395_cam_high.jpg", + "images/task_067_episode_21_410_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_21", + "recording_id": "task_067_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_067_episode_21_173_cam_high.jpg", + "images/task_067_episode_21_188_cam_high.jpg", + "images/task_067_episode_21_203_cam_high.jpg", + "images/task_067_episode_21_218_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_21", + "recording_id": "task_067_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_067_episode_21_25_cam_high.jpg", + "images/task_067_episode_21_40_cam_high.jpg", + "images/task_067_episode_21_55_cam_high.jpg", + "images/task_067_episode_21_70_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_21", + "recording_id": "task_067_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_067_episode_21_75_cam_high.jpg", + "images/task_067_episode_21_78_cam_high.jpg", + "images/task_067_episode_21_81_cam_high.jpg", + "images/task_067_episode_21_84_cam_high.jpg", + "images/task_067_episode_21_87_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_21", + "recording_id": "task_067_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_067_episode_21_126_cam_high.jpg", + "images/task_067_episode_21_129_cam_high.jpg", + "images/task_067_episode_21_132_cam_high.jpg", + "images/task_067_episode_21_135_cam_high.jpg", + "images/task_067_episode_21_138_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_21", + "recording_id": "task_067_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_067_episode_21_269_cam_high.jpg", + "images/task_067_episode_21_272_cam_high.jpg", + "images/task_067_episode_21_275_cam_high.jpg", + "images/task_067_episode_21_278_cam_high.jpg", + "images/task_067_episode_21_281_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_21", + "recording_id": "task_067_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_067_episode_21_401_cam_high.jpg", + "images/task_067_episode_21_404_cam_high.jpg", + "images/task_067_episode_21_407_cam_high.jpg", + "images/task_067_episode_21_410_cam_high.jpg", + "images/task_067_episode_21_413_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_21", + "recording_id": "task_067_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_067_episode_21_447_cam_high.jpg", + "images/task_067_episode_21_450_cam_high.jpg", + "images/task_067_episode_21_453_cam_high.jpg", + "images/task_067_episode_21_456_cam_high.jpg", + "images/task_067_episode_21_459_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_21", + "recording_id": "task_067_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_067_episode_21_264_cam_high.jpg", + "images/task_067_episode_21_267_cam_high.jpg", + "images/task_067_episode_21_270_cam_high.jpg", + "images/task_067_episode_21_273_cam_high.jpg", + "images/task_067_episode_21_276_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_21", + "recording_id": "task_067_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_067_episode_21_40_cam_high.jpg", + "images/task_067_episode_21_43_cam_high.jpg", + "images/task_067_episode_21_46_cam_high.jpg", + "images/task_067_episode_21_49_cam_high.jpg", + "images/task_067_episode_21_52_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_21", + "recording_id": "task_067_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_067_episode_21_69_cam_high.jpg", + "images/task_067_episode_21_72_cam_high.jpg", + "images/task_067_episode_21_75_cam_high.jpg", + "images/task_067_episode_21_78_cam_high.jpg", + "images/task_067_episode_21_81_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_21", + "recording_id": "task_067_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Frame X happens before Frame Y" + } + ], + "images": [ + "images/task_067_episode_21_90_cam_high.jpg", + "images/task_067_episode_21_359_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_21", + "recording_id": "task_067_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Frame Y happens before Frame X" + } + ], + "images": [ + "images/task_067_episode_21_309_cam_high.jpg", + "images/task_067_episode_21_125_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_21", + "recording_id": "task_067_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Frame X happens before Frame Y" + } + ], + "images": [ + "images/task_067_episode_21_175_cam_high.jpg", + "images/task_067_episode_21_409_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_21", + "recording_id": "task_067_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_067_episode_21_104_cam_high.jpg", + "images/task_067_episode_21_78_cam_high.jpg", + "images/task_067_episode_21_131_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_21", + "recording_id": "task_067_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_067_episode_21_131_cam_high.jpg", + "images/task_067_episode_21_157_cam_high.jpg", + "images/task_067_episode_21_104_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_21", + "recording_id": "task_067_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_067_episode_21_421_cam_high.jpg", + "images/task_067_episode_21_448_cam_high.jpg", + "images/task_067_episode_21_395_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_21", + "recording_id": "task_067_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_067_episode_21_368_cam_high.jpg", + "images/task_067_episode_21_421_cam_high.jpg", + "images/task_067_episode_21_395_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_21", + "recording_id": "task_067_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_067_episode_21_342_cam_high.jpg", + "images/task_067_episode_21_368_cam_high.jpg", + "images/task_067_episode_21_395_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_21", + "recording_id": "task_067_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_067_episode_21_289_cam_high.jpg", + "images/task_067_episode_21_316_cam_high.jpg", + "images/task_067_episode_21_342_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_21", + "recording_id": "task_067_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_067_episode_21_78_cam_high.jpg", + "images/task_067_episode_21_25_cam_high.jpg", + "images/task_067_episode_21_51_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_21", + "recording_id": "task_067_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_067_episode_21_51_cam_high.jpg", + "images/task_067_episode_21_104_cam_high.jpg", + "images/task_067_episode_21_78_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_21", + "recording_id": "task_067_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_067_episode_21_474_cam_high.jpg", + "images/task_067_episode_21_448_cam_high.jpg", + "images/task_067_episode_21_421_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_21", + "recording_id": "task_067_episode_21", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_067_episode_26_0_cam_high.jpg", + "images/task_067_episode_26_15_cam_high.jpg", + "images/task_067_episode_26_30_cam_high.jpg", + "images/task_067_episode_26_45_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_26", + "recording_id": "task_067_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_067_episode_26_6_cam_high.jpg", + "images/task_067_episode_26_21_cam_high.jpg", + "images/task_067_episode_26_36_cam_high.jpg", + "images/task_067_episode_26_51_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_26", + "recording_id": "task_067_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_067_episode_26_12_cam_high.jpg", + "images/task_067_episode_26_27_cam_high.jpg", + "images/task_067_episode_26_42_cam_high.jpg", + "images/task_067_episode_26_57_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_26", + "recording_id": "task_067_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_067_episode_26_84_cam_high.jpg", + "images/task_067_episode_26_99_cam_high.jpg", + "images/task_067_episode_26_114_cam_high.jpg", + "images/task_067_episode_26_129_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_26", + "recording_id": "task_067_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_067_episode_26_90_cam_high.jpg", + "images/task_067_episode_26_105_cam_high.jpg", + "images/task_067_episode_26_120_cam_high.jpg", + "images/task_067_episode_26_135_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_26", + "recording_id": "task_067_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_067_episode_26_96_cam_high.jpg", + "images/task_067_episode_26_111_cam_high.jpg", + "images/task_067_episode_26_126_cam_high.jpg", + "images/task_067_episode_26_141_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_26", + "recording_id": "task_067_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Up" + } + ], + "images": [ + "images/task_067_episode_26_138_cam_high.jpg", + "images/task_067_episode_26_153_cam_high.jpg", + "images/task_067_episode_26_168_cam_high.jpg", + "images/task_067_episode_26_183_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_26", + "recording_id": "task_067_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_067_episode_26_235_cam_high.jpg", + "images/task_067_episode_26_250_cam_high.jpg", + "images/task_067_episode_26_265_cam_high.jpg", + "images/task_067_episode_26_280_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_26", + "recording_id": "task_067_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_067_episode_26_359_cam_high.jpg", + "images/task_067_episode_26_374_cam_high.jpg", + "images/task_067_episode_26_389_cam_high.jpg", + "images/task_067_episode_26_404_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_26", + "recording_id": "task_067_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_067_episode_26_6_cam_high.jpg", + "images/task_067_episode_26_21_cam_high.jpg", + "images/task_067_episode_26_36_cam_high.jpg", + "images/task_067_episode_26_51_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_26", + "recording_id": "task_067_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_067_episode_26_247_cam_high.jpg", + "images/task_067_episode_26_262_cam_high.jpg", + "images/task_067_episode_26_277_cam_high.jpg", + "images/task_067_episode_26_292_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_26", + "recording_id": "task_067_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_067_episode_26_68_cam_high.jpg", + "images/task_067_episode_26_83_cam_high.jpg", + "images/task_067_episode_26_98_cam_high.jpg", + "images/task_067_episode_26_113_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_26", + "recording_id": "task_067_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_067_episode_26_74_cam_high.jpg", + "images/task_067_episode_26_89_cam_high.jpg", + "images/task_067_episode_26_104_cam_high.jpg", + "images/task_067_episode_26_119_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_26", + "recording_id": "task_067_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_067_episode_26_56_cam_high.jpg", + "images/task_067_episode_26_71_cam_high.jpg", + "images/task_067_episode_26_86_cam_high.jpg", + "images/task_067_episode_26_101_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_26", + "recording_id": "task_067_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_067_episode_26_161_cam_high.jpg", + "images/task_067_episode_26_176_cam_high.jpg", + "images/task_067_episode_26_191_cam_high.jpg", + "images/task_067_episode_26_206_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_26", + "recording_id": "task_067_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_067_episode_26_43_cam_high.jpg", + "images/task_067_episode_26_58_cam_high.jpg", + "images/task_067_episode_26_73_cam_high.jpg", + "images/task_067_episode_26_88_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_26", + "recording_id": "task_067_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_067_episode_26_334_cam_high.jpg", + "images/task_067_episode_26_349_cam_high.jpg", + "images/task_067_episode_26_364_cam_high.jpg", + "images/task_067_episode_26_379_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_26", + "recording_id": "task_067_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_067_episode_26_328_cam_high.jpg", + "images/task_067_episode_26_343_cam_high.jpg", + "images/task_067_episode_26_358_cam_high.jpg", + "images/task_067_episode_26_373_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_26", + "recording_id": "task_067_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_067_episode_26_117_cam_high.jpg", + "images/task_067_episode_26_132_cam_high.jpg", + "images/task_067_episode_26_147_cam_high.jpg", + "images/task_067_episode_26_162_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_26", + "recording_id": "task_067_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_067_episode_26_138_cam_high.jpg", + "images/task_067_episode_26_141_cam_high.jpg", + "images/task_067_episode_26_144_cam_high.jpg", + "images/task_067_episode_26_147_cam_high.jpg", + "images/task_067_episode_26_150_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_26", + "recording_id": "task_067_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_067_episode_26_361_cam_high.jpg", + "images/task_067_episode_26_364_cam_high.jpg", + "images/task_067_episode_26_367_cam_high.jpg", + "images/task_067_episode_26_370_cam_high.jpg", + "images/task_067_episode_26_373_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_26", + "recording_id": "task_067_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_067_episode_26_401_cam_high.jpg", + "images/task_067_episode_26_404_cam_high.jpg", + "images/task_067_episode_26_407_cam_high.jpg", + "images/task_067_episode_26_410_cam_high.jpg", + "images/task_067_episode_26_413_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_26", + "recording_id": "task_067_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_067_episode_26_92_cam_high.jpg", + "images/task_067_episode_26_95_cam_high.jpg", + "images/task_067_episode_26_98_cam_high.jpg", + "images/task_067_episode_26_101_cam_high.jpg", + "images/task_067_episode_26_104_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_26", + "recording_id": "task_067_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_067_episode_26_75_cam_high.jpg", + "images/task_067_episode_26_78_cam_high.jpg", + "images/task_067_episode_26_81_cam_high.jpg", + "images/task_067_episode_26_84_cam_high.jpg", + "images/task_067_episode_26_87_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_26", + "recording_id": "task_067_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_067_episode_26_143_cam_high.jpg", + "images/task_067_episode_26_146_cam_high.jpg", + "images/task_067_episode_26_149_cam_high.jpg", + "images/task_067_episode_26_152_cam_high.jpg", + "images/task_067_episode_26_155_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_26", + "recording_id": "task_067_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_067_episode_26_46_cam_high.jpg", + "images/task_067_episode_26_49_cam_high.jpg", + "images/task_067_episode_26_52_cam_high.jpg", + "images/task_067_episode_26_55_cam_high.jpg", + "images/task_067_episode_26_58_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_26", + "recording_id": "task_067_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_067_episode_26_92_cam_high.jpg", + "images/task_067_episode_26_95_cam_high.jpg", + "images/task_067_episode_26_98_cam_high.jpg", + "images/task_067_episode_26_101_cam_high.jpg", + "images/task_067_episode_26_104_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_26", + "recording_id": "task_067_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Frame Y happens before Frame X" + } + ], + "images": [ + "images/task_067_episode_26_359_cam_high.jpg", + "images/task_067_episode_26_90_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_26", + "recording_id": "task_067_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Frame Y happens before Frame X" + } + ], + "images": [ + "images/task_067_episode_26_309_cam_high.jpg", + "images/task_067_episode_26_125_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_26", + "recording_id": "task_067_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Frame X happens before Frame Y" + } + ], + "images": [ + "images/task_067_episode_26_175_cam_high.jpg", + "images/task_067_episode_26_409_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_26", + "recording_id": "task_067_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_067_episode_26_316_cam_high.jpg", + "images/task_067_episode_26_263_cam_high.jpg", + "images/task_067_episode_26_289_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_26", + "recording_id": "task_067_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_067_episode_26_236_cam_high.jpg", + "images/task_067_episode_26_183_cam_high.jpg", + "images/task_067_episode_26_210_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_26", + "recording_id": "task_067_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_067_episode_26_210_cam_high.jpg", + "images/task_067_episode_26_157_cam_high.jpg", + "images/task_067_episode_26_183_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_26", + "recording_id": "task_067_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_067_episode_26_51_cam_high.jpg", + "images/task_067_episode_26_78_cam_high.jpg", + "images/task_067_episode_26_104_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_26", + "recording_id": "task_067_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_067_episode_26_131_cam_high.jpg", + "images/task_067_episode_26_78_cam_high.jpg", + "images/task_067_episode_26_104_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_26", + "recording_id": "task_067_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_067_episode_26_474_cam_high.jpg", + "images/task_067_episode_26_448_cam_high.jpg", + "images/task_067_episode_26_421_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_26", + "recording_id": "task_067_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_067_episode_26_342_cam_high.jpg", + "images/task_067_episode_26_316_cam_high.jpg", + "images/task_067_episode_26_368_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_26", + "recording_id": "task_067_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_067_episode_26_395_cam_high.jpg", + "images/task_067_episode_26_368_cam_high.jpg", + "images/task_067_episode_26_421_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_26", + "recording_id": "task_067_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_067_episode_26_263_cam_high.jpg", + "images/task_067_episode_26_289_cam_high.jpg", + "images/task_067_episode_26_236_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_26", + "recording_id": "task_067_episode_26", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_067_episode_28_0_cam_high.jpg", + "images/task_067_episode_28_15_cam_high.jpg", + "images/task_067_episode_28_30_cam_high.jpg", + "images/task_067_episode_28_45_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_28", + "recording_id": "task_067_episode_28", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_067_episode_28_6_cam_high.jpg", + "images/task_067_episode_28_21_cam_high.jpg", + "images/task_067_episode_28_36_cam_high.jpg", + "images/task_067_episode_28_51_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_28", + "recording_id": "task_067_episode_28", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_067_episode_28_12_cam_high.jpg", + "images/task_067_episode_28_27_cam_high.jpg", + "images/task_067_episode_28_42_cam_high.jpg", + "images/task_067_episode_28_57_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_28", + "recording_id": "task_067_episode_28", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_067_episode_28_96_cam_high.jpg", + "images/task_067_episode_28_111_cam_high.jpg", + "images/task_067_episode_28_126_cam_high.jpg", + "images/task_067_episode_28_141_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_28", + "recording_id": "task_067_episode_28", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_067_episode_28_102_cam_high.jpg", + "images/task_067_episode_28_117_cam_high.jpg", + "images/task_067_episode_28_132_cam_high.jpg", + "images/task_067_episode_28_147_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_28", + "recording_id": "task_067_episode_28", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_067_episode_28_108_cam_high.jpg", + "images/task_067_episode_28_123_cam_high.jpg", + "images/task_067_episode_28_138_cam_high.jpg", + "images/task_067_episode_28_153_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_28", + "recording_id": "task_067_episode_28", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Up" + } + ], + "images": [ + "images/task_067_episode_28_144_cam_high.jpg", + "images/task_067_episode_28_159_cam_high.jpg", + "images/task_067_episode_28_174_cam_high.jpg", + "images/task_067_episode_28_189_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_28", + "recording_id": "task_067_episode_28", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_067_episode_28_6_cam_high.jpg", + "images/task_067_episode_28_21_cam_high.jpg", + "images/task_067_episode_28_36_cam_high.jpg", + "images/task_067_episode_28_51_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_28", + "recording_id": "task_067_episode_28", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_067_episode_28_179_cam_high.jpg", + "images/task_067_episode_28_194_cam_high.jpg", + "images/task_067_episode_28_209_cam_high.jpg", + "images/task_067_episode_28_224_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_28", + "recording_id": "task_067_episode_28", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_067_episode_28_254_cam_high.jpg", + "images/task_067_episode_28_269_cam_high.jpg", + "images/task_067_episode_28_284_cam_high.jpg", + "images/task_067_episode_28_299_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_28", + "recording_id": "task_067_episode_28", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_067_episode_28_340_cam_high.jpg", + "images/task_067_episode_28_355_cam_high.jpg", + "images/task_067_episode_28_370_cam_high.jpg", + "images/task_067_episode_28_385_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_28", + "recording_id": "task_067_episode_28", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_067_episode_28_31_cam_high.jpg", + "images/task_067_episode_28_46_cam_high.jpg", + "images/task_067_episode_28_61_cam_high.jpg", + "images/task_067_episode_28_76_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_28", + "recording_id": "task_067_episode_28", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_067_episode_28_105_cam_high.jpg", + "images/task_067_episode_28_120_cam_high.jpg", + "images/task_067_episode_28_135_cam_high.jpg", + "images/task_067_episode_28_150_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_28", + "recording_id": "task_067_episode_28", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_067_episode_28_25_cam_high.jpg", + "images/task_067_episode_28_40_cam_high.jpg", + "images/task_067_episode_28_55_cam_high.jpg", + "images/task_067_episode_28_70_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_28", + "recording_id": "task_067_episode_28", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_067_episode_28_155_cam_high.jpg", + "images/task_067_episode_28_170_cam_high.jpg", + "images/task_067_episode_28_185_cam_high.jpg", + "images/task_067_episode_28_200_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_28", + "recording_id": "task_067_episode_28", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_067_episode_28_0_cam_high.jpg", + "images/task_067_episode_28_15_cam_high.jpg", + "images/task_067_episode_28_30_cam_high.jpg", + "images/task_067_episode_28_45_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_28", + "recording_id": "task_067_episode_28", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_067_episode_28_111_cam_high.jpg", + "images/task_067_episode_28_126_cam_high.jpg", + "images/task_067_episode_28_141_cam_high.jpg", + "images/task_067_episode_28_156_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_28", + "recording_id": "task_067_episode_28", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_067_episode_28_62_cam_high.jpg", + "images/task_067_episode_28_77_cam_high.jpg", + "images/task_067_episode_28_92_cam_high.jpg", + "images/task_067_episode_28_107_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_28", + "recording_id": "task_067_episode_28", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_067_episode_28_166_cam_high.jpg", + "images/task_067_episode_28_169_cam_high.jpg", + "images/task_067_episode_28_172_cam_high.jpg", + "images/task_067_episode_28_175_cam_high.jpg", + "images/task_067_episode_28_178_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_28", + "recording_id": "task_067_episode_28", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_067_episode_28_395_cam_high.jpg", + "images/task_067_episode_28_398_cam_high.jpg", + "images/task_067_episode_28_401_cam_high.jpg", + "images/task_067_episode_28_404_cam_high.jpg", + "images/task_067_episode_28_407_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_28", + "recording_id": "task_067_episode_28", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_067_episode_28_424_cam_high.jpg", + "images/task_067_episode_28_427_cam_high.jpg", + "images/task_067_episode_28_430_cam_high.jpg", + "images/task_067_episode_28_433_cam_high.jpg", + "images/task_067_episode_28_436_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_28", + "recording_id": "task_067_episode_28", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_067_episode_28_332_cam_high.jpg", + "images/task_067_episode_28_335_cam_high.jpg", + "images/task_067_episode_28_338_cam_high.jpg", + "images/task_067_episode_28_341_cam_high.jpg", + "images/task_067_episode_28_344_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_28", + "recording_id": "task_067_episode_28", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_067_episode_28_46_cam_high.jpg", + "images/task_067_episode_28_49_cam_high.jpg", + "images/task_067_episode_28_52_cam_high.jpg", + "images/task_067_episode_28_55_cam_high.jpg", + "images/task_067_episode_28_58_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_28", + "recording_id": "task_067_episode_28", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_067_episode_28_298_cam_high.jpg", + "images/task_067_episode_28_301_cam_high.jpg", + "images/task_067_episode_28_304_cam_high.jpg", + "images/task_067_episode_28_307_cam_high.jpg", + "images/task_067_episode_28_310_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_28", + "recording_id": "task_067_episode_28", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_067_episode_28_286_cam_high.jpg", + "images/task_067_episode_28_289_cam_high.jpg", + "images/task_067_episode_28_292_cam_high.jpg", + "images/task_067_episode_28_295_cam_high.jpg", + "images/task_067_episode_28_298_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_28", + "recording_id": "task_067_episode_28", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_067_episode_28_98_cam_high.jpg", + "images/task_067_episode_28_101_cam_high.jpg", + "images/task_067_episode_28_104_cam_high.jpg", + "images/task_067_episode_28_107_cam_high.jpg", + "images/task_067_episode_28_110_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_28", + "recording_id": "task_067_episode_28", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Frame Y happens before Frame X" + } + ], + "images": [ + "images/task_067_episode_28_359_cam_high.jpg", + "images/task_067_episode_28_90_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_28", + "recording_id": "task_067_episode_28", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Frame X happens before Frame Y" + } + ], + "images": [ + "images/task_067_episode_28_125_cam_high.jpg", + "images/task_067_episode_28_309_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_28", + "recording_id": "task_067_episode_28", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Frame Y happens before Frame X" + } + ], + "images": [ + "images/task_067_episode_28_409_cam_high.jpg", + "images/task_067_episode_28_175_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_28", + "recording_id": "task_067_episode_28", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_067_episode_28_368_cam_high.jpg", + "images/task_067_episode_28_395_cam_high.jpg", + "images/task_067_episode_28_421_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_28", + "recording_id": "task_067_episode_28", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_067_episode_28_316_cam_high.jpg", + "images/task_067_episode_28_263_cam_high.jpg", + "images/task_067_episode_28_289_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_28", + "recording_id": "task_067_episode_28", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_067_episode_28_263_cam_high.jpg", + "images/task_067_episode_28_289_cam_high.jpg", + "images/task_067_episode_28_236_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_28", + "recording_id": "task_067_episode_28", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_067_episode_28_316_cam_high.jpg", + "images/task_067_episode_28_368_cam_high.jpg", + "images/task_067_episode_28_342_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_28", + "recording_id": "task_067_episode_28", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_067_episode_28_210_cam_high.jpg", + "images/task_067_episode_28_236_cam_high.jpg", + "images/task_067_episode_28_263_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_28", + "recording_id": "task_067_episode_28", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_067_episode_28_157_cam_high.jpg", + "images/task_067_episode_28_131_cam_high.jpg", + "images/task_067_episode_28_104_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_28", + "recording_id": "task_067_episode_28", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_067_episode_28_183_cam_high.jpg", + "images/task_067_episode_28_236_cam_high.jpg", + "images/task_067_episode_28_210_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_28", + "recording_id": "task_067_episode_28", + "episode_id": null, + "camera": "cam_high", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Down\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_067_episode_36_0_cam_high.jpg", + "images/task_067_episode_36_15_cam_high.jpg", + "images/task_067_episode_36_30_cam_high.jpg", + "images/task_067_episode_36_45_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_36", + "recording_id": "task_067_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_067_episode_36_6_cam_high.jpg", + "images/task_067_episode_36_21_cam_high.jpg", + "images/task_067_episode_36_36_cam_high.jpg", + "images/task_067_episode_36_51_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_36", + "recording_id": "task_067_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_067_episode_36_12_cam_high.jpg", + "images/task_067_episode_36_27_cam_high.jpg", + "images/task_067_episode_36_42_cam_high.jpg", + "images/task_067_episode_36_57_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_36", + "recording_id": "task_067_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_067_episode_36_156_cam_high.jpg", + "images/task_067_episode_36_171_cam_high.jpg", + "images/task_067_episode_36_186_cam_high.jpg", + "images/task_067_episode_36_201_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_36", + "recording_id": "task_067_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_067_episode_36_162_cam_high.jpg", + "images/task_067_episode_36_177_cam_high.jpg", + "images/task_067_episode_36_192_cam_high.jpg", + "images/task_067_episode_36_207_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_36", + "recording_id": "task_067_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_067_episode_36_168_cam_high.jpg", + "images/task_067_episode_36_183_cam_high.jpg", + "images/task_067_episode_36_198_cam_high.jpg", + "images/task_067_episode_36_213_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_36", + "recording_id": "task_067_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Down" + } + ], + "images": [ + "images/task_067_episode_36_312_cam_high.jpg", + "images/task_067_episode_36_327_cam_high.jpg", + "images/task_067_episode_36_342_cam_high.jpg", + "images/task_067_episode_36_357_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_36", + "recording_id": "task_067_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_067_episode_36_396_cam_high.jpg", + "images/task_067_episode_36_411_cam_high.jpg", + "images/task_067_episode_36_426_cam_high.jpg", + "images/task_067_episode_36_441_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_36", + "recording_id": "task_067_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_067_episode_36_278_cam_high.jpg", + "images/task_067_episode_36_293_cam_high.jpg", + "images/task_067_episode_36_308_cam_high.jpg", + "images/task_067_episode_36_323_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_36", + "recording_id": "task_067_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_067_episode_36_346_cam_high.jpg", + "images/task_067_episode_36_361_cam_high.jpg", + "images/task_067_episode_36_376_cam_high.jpg", + "images/task_067_episode_36_391_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_36", + "recording_id": "task_067_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_067_episode_36_37_cam_high.jpg", + "images/task_067_episode_36_52_cam_high.jpg", + "images/task_067_episode_36_67_cam_high.jpg", + "images/task_067_episode_36_82_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_36", + "recording_id": "task_067_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_067_episode_36_56_cam_high.jpg", + "images/task_067_episode_36_71_cam_high.jpg", + "images/task_067_episode_36_86_cam_high.jpg", + "images/task_067_episode_36_101_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_36", + "recording_id": "task_067_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_067_episode_36_192_cam_high.jpg", + "images/task_067_episode_36_207_cam_high.jpg", + "images/task_067_episode_36_222_cam_high.jpg", + "images/task_067_episode_36_237_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_36", + "recording_id": "task_067_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_067_episode_36_408_cam_high.jpg", + "images/task_067_episode_36_423_cam_high.jpg", + "images/task_067_episode_36_438_cam_high.jpg", + "images/task_067_episode_36_453_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_36", + "recording_id": "task_067_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_067_episode_36_334_cam_high.jpg", + "images/task_067_episode_36_349_cam_high.jpg", + "images/task_067_episode_36_364_cam_high.jpg", + "images/task_067_episode_36_379_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_36", + "recording_id": "task_067_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_067_episode_36_12_cam_high.jpg", + "images/task_067_episode_36_27_cam_high.jpg", + "images/task_067_episode_36_42_cam_high.jpg", + "images/task_067_episode_36_57_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_36", + "recording_id": "task_067_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_067_episode_36_338_cam_high.jpg", + "images/task_067_episode_36_341_cam_high.jpg", + "images/task_067_episode_36_344_cam_high.jpg", + "images/task_067_episode_36_347_cam_high.jpg", + "images/task_067_episode_36_350_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_36", + "recording_id": "task_067_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_067_episode_36_470_cam_high.jpg", + "images/task_067_episode_36_473_cam_high.jpg", + "images/task_067_episode_36_476_cam_high.jpg", + "images/task_067_episode_36_479_cam_high.jpg", + "images/task_067_episode_36_482_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_36", + "recording_id": "task_067_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_067_episode_36_389_cam_high.jpg", + "images/task_067_episode_36_392_cam_high.jpg", + "images/task_067_episode_36_395_cam_high.jpg", + "images/task_067_episode_36_398_cam_high.jpg", + "images/task_067_episode_36_401_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_36", + "recording_id": "task_067_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_067_episode_36_229_cam_high.jpg", + "images/task_067_episode_36_232_cam_high.jpg", + "images/task_067_episode_36_235_cam_high.jpg", + "images/task_067_episode_36_238_cam_high.jpg", + "images/task_067_episode_36_241_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_36", + "recording_id": "task_067_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_067_episode_36_338_cam_high.jpg", + "images/task_067_episode_36_341_cam_high.jpg", + "images/task_067_episode_36_344_cam_high.jpg", + "images/task_067_episode_36_347_cam_high.jpg", + "images/task_067_episode_36_350_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_36", + "recording_id": "task_067_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_067_episode_36_63_cam_high.jpg", + "images/task_067_episode_36_66_cam_high.jpg", + "images/task_067_episode_36_69_cam_high.jpg", + "images/task_067_episode_36_72_cam_high.jpg", + "images/task_067_episode_36_75_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_36", + "recording_id": "task_067_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_067_episode_36_258_cam_high.jpg", + "images/task_067_episode_36_261_cam_high.jpg", + "images/task_067_episode_36_264_cam_high.jpg", + "images/task_067_episode_36_267_cam_high.jpg", + "images/task_067_episode_36_270_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_36", + "recording_id": "task_067_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_067_episode_36_35_cam_high.jpg", + "images/task_067_episode_36_38_cam_high.jpg", + "images/task_067_episode_36_41_cam_high.jpg", + "images/task_067_episode_36_44_cam_high.jpg", + "images/task_067_episode_36_47_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_36", + "recording_id": "task_067_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Frame Y happens before Frame X" + } + ], + "images": [ + "images/task_067_episode_36_359_cam_high.jpg", + "images/task_067_episode_36_90_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_36", + "recording_id": "task_067_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Frame Y happens before Frame X" + } + ], + "images": [ + "images/task_067_episode_36_309_cam_high.jpg", + "images/task_067_episode_36_125_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_36", + "recording_id": "task_067_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Frame X happens before Frame Y" + } + ], + "images": [ + "images/task_067_episode_36_175_cam_high.jpg", + "images/task_067_episode_36_409_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_36", + "recording_id": "task_067_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_067_episode_36_183_cam_high.jpg", + "images/task_067_episode_36_157_cam_high.jpg", + "images/task_067_episode_36_131_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_36", + "recording_id": "task_067_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_067_episode_36_316_cam_high.jpg", + "images/task_067_episode_36_289_cam_high.jpg", + "images/task_067_episode_36_342_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_36", + "recording_id": "task_067_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_067_episode_36_342_cam_high.jpg", + "images/task_067_episode_36_395_cam_high.jpg", + "images/task_067_episode_36_368_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_36", + "recording_id": "task_067_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_067_episode_36_183_cam_high.jpg", + "images/task_067_episode_36_210_cam_high.jpg", + "images/task_067_episode_36_157_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_36", + "recording_id": "task_067_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_067_episode_36_78_cam_high.jpg", + "images/task_067_episode_36_131_cam_high.jpg", + "images/task_067_episode_36_104_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_36", + "recording_id": "task_067_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_067_episode_36_448_cam_high.jpg", + "images/task_067_episode_36_395_cam_high.jpg", + "images/task_067_episode_36_421_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_36", + "recording_id": "task_067_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_067_episode_36_263_cam_high.jpg", + "images/task_067_episode_36_289_cam_high.jpg", + "images/task_067_episode_36_316_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_36", + "recording_id": "task_067_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_067_episode_36_474_cam_high.jpg", + "images/task_067_episode_36_421_cam_high.jpg", + "images/task_067_episode_36_448_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_36", + "recording_id": "task_067_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_067_episode_36_210_cam_high.jpg", + "images/task_067_episode_36_236_cam_high.jpg", + "images/task_067_episode_36_183_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_36", + "recording_id": "task_067_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_067_episode_36_51_cam_high.jpg", + "images/task_067_episode_36_104_cam_high.jpg", + "images/task_067_episode_36_78_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_36", + "recording_id": "task_067_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_067_episode_36_263_cam_high.jpg", + "images/task_067_episode_36_236_cam_high.jpg", + "images/task_067_episode_36_210_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_36", + "recording_id": "task_067_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_067_episode_36_316_cam_high.jpg", + "images/task_067_episode_36_368_cam_high.jpg", + "images/task_067_episode_36_342_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_36", + "recording_id": "task_067_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_067_episode_36_421_cam_high.jpg", + "images/task_067_episode_36_395_cam_high.jpg", + "images/task_067_episode_36_368_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_36", + "recording_id": "task_067_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_067_episode_36_51_cam_high.jpg", + "images/task_067_episode_36_25_cam_high.jpg", + "images/task_067_episode_36_78_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_36", + "recording_id": "task_067_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_067_episode_44_0_cam_high.jpg", + "images/task_067_episode_44_15_cam_high.jpg", + "images/task_067_episode_44_30_cam_high.jpg", + "images/task_067_episode_44_45_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_44", + "recording_id": "task_067_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_067_episode_44_6_cam_high.jpg", + "images/task_067_episode_44_21_cam_high.jpg", + "images/task_067_episode_44_36_cam_high.jpg", + "images/task_067_episode_44_51_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_44", + "recording_id": "task_067_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_067_episode_44_12_cam_high.jpg", + "images/task_067_episode_44_27_cam_high.jpg", + "images/task_067_episode_44_42_cam_high.jpg", + "images/task_067_episode_44_57_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_44", + "recording_id": "task_067_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Down\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_067_episode_44_84_cam_high.jpg", + "images/task_067_episode_44_99_cam_high.jpg", + "images/task_067_episode_44_114_cam_high.jpg", + "images/task_067_episode_44_129_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_44", + "recording_id": "task_067_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Down\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_067_episode_44_90_cam_high.jpg", + "images/task_067_episode_44_105_cam_high.jpg", + "images/task_067_episode_44_120_cam_high.jpg", + "images/task_067_episode_44_135_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_44", + "recording_id": "task_067_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_067_episode_44_96_cam_high.jpg", + "images/task_067_episode_44_111_cam_high.jpg", + "images/task_067_episode_44_126_cam_high.jpg", + "images/task_067_episode_44_141_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_44", + "recording_id": "task_067_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Up" + } + ], + "images": [ + "images/task_067_episode_44_312_cam_high.jpg", + "images/task_067_episode_44_327_cam_high.jpg", + "images/task_067_episode_44_342_cam_high.jpg", + "images/task_067_episode_44_357_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_44", + "recording_id": "task_067_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_067_episode_44_80_cam_high.jpg", + "images/task_067_episode_44_95_cam_high.jpg", + "images/task_067_episode_44_110_cam_high.jpg", + "images/task_067_episode_44_125_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_44", + "recording_id": "task_067_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_067_episode_44_25_cam_high.jpg", + "images/task_067_episode_44_40_cam_high.jpg", + "images/task_067_episode_44_55_cam_high.jpg", + "images/task_067_episode_44_70_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_44", + "recording_id": "task_067_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_067_episode_44_74_cam_high.jpg", + "images/task_067_episode_44_89_cam_high.jpg", + "images/task_067_episode_44_104_cam_high.jpg", + "images/task_067_episode_44_119_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_44", + "recording_id": "task_067_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_067_episode_44_229_cam_high.jpg", + "images/task_067_episode_44_244_cam_high.jpg", + "images/task_067_episode_44_259_cam_high.jpg", + "images/task_067_episode_44_274_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_44", + "recording_id": "task_067_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_067_episode_44_99_cam_high.jpg", + "images/task_067_episode_44_114_cam_high.jpg", + "images/task_067_episode_44_129_cam_high.jpg", + "images/task_067_episode_44_144_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_44", + "recording_id": "task_067_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_067_episode_44_278_cam_high.jpg", + "images/task_067_episode_44_293_cam_high.jpg", + "images/task_067_episode_44_308_cam_high.jpg", + "images/task_067_episode_44_323_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_44", + "recording_id": "task_067_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_067_episode_44_155_cam_high.jpg", + "images/task_067_episode_44_170_cam_high.jpg", + "images/task_067_episode_44_185_cam_high.jpg", + "images/task_067_episode_44_200_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_44", + "recording_id": "task_067_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_067_episode_44_0_cam_high.jpg", + "images/task_067_episode_44_15_cam_high.jpg", + "images/task_067_episode_44_30_cam_high.jpg", + "images/task_067_episode_44_45_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_44", + "recording_id": "task_067_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_067_episode_44_361_cam_high.jpg", + "images/task_067_episode_44_364_cam_high.jpg", + "images/task_067_episode_44_367_cam_high.jpg", + "images/task_067_episode_44_370_cam_high.jpg", + "images/task_067_episode_44_373_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_44", + "recording_id": "task_067_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_067_episode_44_447_cam_high.jpg", + "images/task_067_episode_44_450_cam_high.jpg", + "images/task_067_episode_44_453_cam_high.jpg", + "images/task_067_episode_44_456_cam_high.jpg", + "images/task_067_episode_44_459_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_44", + "recording_id": "task_067_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_067_episode_44_286_cam_high.jpg", + "images/task_067_episode_44_289_cam_high.jpg", + "images/task_067_episode_44_292_cam_high.jpg", + "images/task_067_episode_44_295_cam_high.jpg", + "images/task_067_episode_44_298_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_44", + "recording_id": "task_067_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_067_episode_44_29_cam_high.jpg", + "images/task_067_episode_44_32_cam_high.jpg", + "images/task_067_episode_44_35_cam_high.jpg", + "images/task_067_episode_44_38_cam_high.jpg", + "images/task_067_episode_44_41_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_44", + "recording_id": "task_067_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_067_episode_44_6_cam_high.jpg", + "images/task_067_episode_44_9_cam_high.jpg", + "images/task_067_episode_44_12_cam_high.jpg", + "images/task_067_episode_44_15_cam_high.jpg", + "images/task_067_episode_44_18_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_44", + "recording_id": "task_067_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_067_episode_44_138_cam_high.jpg", + "images/task_067_episode_44_141_cam_high.jpg", + "images/task_067_episode_44_144_cam_high.jpg", + "images/task_067_episode_44_147_cam_high.jpg", + "images/task_067_episode_44_150_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_44", + "recording_id": "task_067_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_067_episode_44_75_cam_high.jpg", + "images/task_067_episode_44_78_cam_high.jpg", + "images/task_067_episode_44_81_cam_high.jpg", + "images/task_067_episode_44_84_cam_high.jpg", + "images/task_067_episode_44_87_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_44", + "recording_id": "task_067_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_067_episode_44_52_cam_high.jpg", + "images/task_067_episode_44_55_cam_high.jpg", + "images/task_067_episode_44_58_cam_high.jpg", + "images/task_067_episode_44_61_cam_high.jpg", + "images/task_067_episode_44_64_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_44", + "recording_id": "task_067_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Frame Y happens before Frame X" + } + ], + "images": [ + "images/task_067_episode_44_359_cam_high.jpg", + "images/task_067_episode_44_90_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_44", + "recording_id": "task_067_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Frame Y happens before Frame X" + } + ], + "images": [ + "images/task_067_episode_44_309_cam_high.jpg", + "images/task_067_episode_44_125_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_44", + "recording_id": "task_067_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Frame Y happens before Frame X" + } + ], + "images": [ + "images/task_067_episode_44_409_cam_high.jpg", + "images/task_067_episode_44_175_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_44", + "recording_id": "task_067_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_067_episode_44_263_cam_high.jpg", + "images/task_067_episode_44_236_cam_high.jpg", + "images/task_067_episode_44_289_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_44", + "recording_id": "task_067_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_067_episode_44_316_cam_high.jpg", + "images/task_067_episode_44_263_cam_high.jpg", + "images/task_067_episode_44_289_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_44", + "recording_id": "task_067_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_067_episode_44_395_cam_high.jpg", + "images/task_067_episode_44_448_cam_high.jpg", + "images/task_067_episode_44_421_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_44", + "recording_id": "task_067_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_067_episode_44_421_cam_high.jpg", + "images/task_067_episode_44_448_cam_high.jpg", + "images/task_067_episode_44_474_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_44", + "recording_id": "task_067_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_067_episode_44_104_cam_high.jpg", + "images/task_067_episode_44_131_cam_high.jpg", + "images/task_067_episode_44_78_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_44", + "recording_id": "task_067_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_067_episode_44_183_cam_high.jpg", + "images/task_067_episode_44_157_cam_high.jpg", + "images/task_067_episode_44_210_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_44", + "recording_id": "task_067_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_067_episode_44_210_cam_high.jpg", + "images/task_067_episode_44_263_cam_high.jpg", + "images/task_067_episode_44_236_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_44", + "recording_id": "task_067_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_067_episode_44_316_cam_high.jpg", + "images/task_067_episode_44_342_cam_high.jpg", + "images/task_067_episode_44_368_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_44", + "recording_id": "task_067_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_067_episode_44_236_cam_high.jpg", + "images/task_067_episode_44_183_cam_high.jpg", + "images/task_067_episode_44_210_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_44", + "recording_id": "task_067_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_067_episode_44_25_cam_high.jpg", + "images/task_067_episode_44_78_cam_high.jpg", + "images/task_067_episode_44_51_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_067", + "split": "eval", + "split_group_id": "task_067_episode_44", + "recording_id": "task_067_episode_44", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Left" + } + ], + "images": [ + "images/task_070_episode_16_0_cam_high.jpg", + "images/task_070_episode_16_15_cam_high.jpg", + "images/task_070_episode_16_30_cam_high.jpg", + "images/task_070_episode_16_45_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Left" + } + ], + "images": [ + "images/task_070_episode_16_6_cam_high.jpg", + "images/task_070_episode_16_21_cam_high.jpg", + "images/task_070_episode_16_36_cam_high.jpg", + "images/task_070_episode_16_51_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Left\nC. Down\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Left" + } + ], + "images": [ + "images/task_070_episode_16_12_cam_high.jpg", + "images/task_070_episode_16_27_cam_high.jpg", + "images/task_070_episode_16_42_cam_high.jpg", + "images/task_070_episode_16_57_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_070_episode_16_96_cam_high.jpg", + "images/task_070_episode_16_111_cam_high.jpg", + "images/task_070_episode_16_126_cam_high.jpg", + "images/task_070_episode_16_141_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Left\nC. Down\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_070_episode_16_102_cam_high.jpg", + "images/task_070_episode_16_117_cam_high.jpg", + "images/task_070_episode_16_132_cam_high.jpg", + "images/task_070_episode_16_147_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_070_episode_16_108_cam_high.jpg", + "images/task_070_episode_16_123_cam_high.jpg", + "images/task_070_episode_16_138_cam_high.jpg", + "images/task_070_episode_16_153_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Up" + } + ], + "images": [ + "images/task_070_episode_16_156_cam_high.jpg", + "images/task_070_episode_16_171_cam_high.jpg", + "images/task_070_episode_16_186_cam_high.jpg", + "images/task_070_episode_16_201_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Left\nC. Down\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Up" + } + ], + "images": [ + "images/task_070_episode_16_162_cam_high.jpg", + "images/task_070_episode_16_177_cam_high.jpg", + "images/task_070_episode_16_192_cam_high.jpg", + "images/task_070_episode_16_207_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Up" + } + ], + "images": [ + "images/task_070_episode_16_168_cam_high.jpg", + "images/task_070_episode_16_183_cam_high.jpg", + "images/task_070_episode_16_198_cam_high.jpg", + "images/task_070_episode_16_213_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Up" + } + ], + "images": [ + "images/task_070_episode_16_234_cam_high.jpg", + "images/task_070_episode_16_249_cam_high.jpg", + "images/task_070_episode_16_264_cam_high.jpg", + "images/task_070_episode_16_279_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Up" + } + ], + "images": [ + "images/task_070_episode_16_240_cam_high.jpg", + "images/task_070_episode_16_255_cam_high.jpg", + "images/task_070_episode_16_270_cam_high.jpg", + "images/task_070_episode_16_285_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Up" + } + ], + "images": [ + "images/task_070_episode_16_246_cam_high.jpg", + "images/task_070_episode_16_261_cam_high.jpg", + "images/task_070_episode_16_276_cam_high.jpg", + "images/task_070_episode_16_291_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Down" + } + ], + "images": [ + "images/task_070_episode_16_372_cam_high.jpg", + "images/task_070_episode_16_387_cam_high.jpg", + "images/task_070_episode_16_402_cam_high.jpg", + "images/task_070_episode_16_417_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Down" + } + ], + "images": [ + "images/task_070_episode_16_378_cam_high.jpg", + "images/task_070_episode_16_393_cam_high.jpg", + "images/task_070_episode_16_408_cam_high.jpg", + "images/task_070_episode_16_423_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Down" + } + ], + "images": [ + "images/task_070_episode_16_384_cam_high.jpg", + "images/task_070_episode_16_399_cam_high.jpg", + "images/task_070_episode_16_414_cam_high.jpg", + "images/task_070_episode_16_429_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_070_episode_16_468_cam_high.jpg", + "images/task_070_episode_16_483_cam_high.jpg", + "images/task_070_episode_16_498_cam_high.jpg", + "images/task_070_episode_16_513_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_070_episode_16_474_cam_high.jpg", + "images/task_070_episode_16_489_cam_high.jpg", + "images/task_070_episode_16_504_cam_high.jpg", + "images/task_070_episode_16_519_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_070_episode_16_480_cam_high.jpg", + "images/task_070_episode_16_495_cam_high.jpg", + "images/task_070_episode_16_510_cam_high.jpg", + "images/task_070_episode_16_525_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Down" + } + ], + "images": [ + "images/task_070_episode_16_1128_cam_high.jpg", + "images/task_070_episode_16_1143_cam_high.jpg", + "images/task_070_episode_16_1158_cam_high.jpg", + "images/task_070_episode_16_1173_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Down" + } + ], + "images": [ + "images/task_070_episode_16_1134_cam_high.jpg", + "images/task_070_episode_16_1149_cam_high.jpg", + "images/task_070_episode_16_1164_cam_high.jpg", + "images/task_070_episode_16_1179_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Down" + } + ], + "images": [ + "images/task_070_episode_16_1140_cam_high.jpg", + "images/task_070_episode_16_1155_cam_high.jpg", + "images/task_070_episode_16_1170_cam_high.jpg", + "images/task_070_episode_16_1185_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_070_episode_16_1196_cam_high.jpg", + "images/task_070_episode_16_1211_cam_high.jpg", + "images/task_070_episode_16_1226_cam_high.jpg", + "images/task_070_episode_16_1241_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_070_episode_16_385_cam_high.jpg", + "images/task_070_episode_16_400_cam_high.jpg", + "images/task_070_episode_16_415_cam_high.jpg", + "images/task_070_episode_16_430_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_070_episode_16_567_cam_high.jpg", + "images/task_070_episode_16_582_cam_high.jpg", + "images/task_070_episode_16_597_cam_high.jpg", + "images/task_070_episode_16_612_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_070_episode_16_101_cam_high.jpg", + "images/task_070_episode_16_116_cam_high.jpg", + "images/task_070_episode_16_131_cam_high.jpg", + "images/task_070_episode_16_146_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_070_episode_16_507_cam_high.jpg", + "images/task_070_episode_16_522_cam_high.jpg", + "images/task_070_episode_16_537_cam_high.jpg", + "images/task_070_episode_16_552_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_070_episode_16_547_cam_high.jpg", + "images/task_070_episode_16_562_cam_high.jpg", + "images/task_070_episode_16_577_cam_high.jpg", + "images/task_070_episode_16_592_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_070_episode_16_1155_cam_high.jpg", + "images/task_070_episode_16_1170_cam_high.jpg", + "images/task_070_episode_16_1185_cam_high.jpg", + "images/task_070_episode_16_1200_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_070_episode_16_790_cam_high.jpg", + "images/task_070_episode_16_805_cam_high.jpg", + "images/task_070_episode_16_820_cam_high.jpg", + "images/task_070_episode_16_835_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_070_episode_16_0_cam_high.jpg", + "images/task_070_episode_16_15_cam_high.jpg", + "images/task_070_episode_16_30_cam_high.jpg", + "images/task_070_episode_16_45_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_070_episode_16_1358_cam_high.jpg", + "images/task_070_episode_16_1373_cam_high.jpg", + "images/task_070_episode_16_1388_cam_high.jpg", + "images/task_070_episode_16_1403_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_070_episode_16_750_cam_high.jpg", + "images/task_070_episode_16_765_cam_high.jpg", + "images/task_070_episode_16_780_cam_high.jpg", + "images/task_070_episode_16_795_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_070_episode_16_1143_cam_high.jpg", + "images/task_070_episode_16_1146_cam_high.jpg", + "images/task_070_episode_16_1149_cam_high.jpg", + "images/task_070_episode_16_1152_cam_high.jpg", + "images/task_070_episode_16_1155_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_070_episode_16_895_cam_high.jpg", + "images/task_070_episode_16_898_cam_high.jpg", + "images/task_070_episode_16_901_cam_high.jpg", + "images/task_070_episode_16_904_cam_high.jpg", + "images/task_070_episode_16_907_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_070_episode_16_1268_cam_high.jpg", + "images/task_070_episode_16_1271_cam_high.jpg", + "images/task_070_episode_16_1274_cam_high.jpg", + "images/task_070_episode_16_1277_cam_high.jpg", + "images/task_070_episode_16_1280_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_070_episode_16_841_cam_high.jpg", + "images/task_070_episode_16_844_cam_high.jpg", + "images/task_070_episode_16_847_cam_high.jpg", + "images/task_070_episode_16_850_cam_high.jpg", + "images/task_070_episode_16_853_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_070_episode_16_895_cam_high.jpg", + "images/task_070_episode_16_898_cam_high.jpg", + "images/task_070_episode_16_901_cam_high.jpg", + "images/task_070_episode_16_904_cam_high.jpg", + "images/task_070_episode_16_907_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_070_episode_16_237_cam_high.jpg", + "images/task_070_episode_16_240_cam_high.jpg", + "images/task_070_episode_16_243_cam_high.jpg", + "images/task_070_episode_16_246_cam_high.jpg", + "images/task_070_episode_16_249_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_070_episode_16_859_cam_high.jpg", + "images/task_070_episode_16_862_cam_high.jpg", + "images/task_070_episode_16_865_cam_high.jpg", + "images/task_070_episode_16_868_cam_high.jpg", + "images/task_070_episode_16_871_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_070_episode_16_1179_cam_high.jpg", + "images/task_070_episode_16_1182_cam_high.jpg", + "images/task_070_episode_16_1185_cam_high.jpg", + "images/task_070_episode_16_1188_cam_high.jpg", + "images/task_070_episode_16_1191_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Frame X happens before Frame Y" + } + ], + "images": [ + "images/task_070_episode_16_270_cam_high.jpg", + "images/task_070_episode_16_1079_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Frame X happens before Frame Y" + } + ], + "images": [ + "images/task_070_episode_16_375_cam_high.jpg", + "images/task_070_episode_16_929_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Frame Y happens before Frame X" + } + ], + "images": [ + "images/task_070_episode_16_1229_cam_high.jpg", + "images/task_070_episode_16_525_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_070_episode_16_1265_cam_high.jpg", + "images/task_070_episode_16_1107_cam_high.jpg", + "images/task_070_episode_16_1186_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_070_episode_16_551_cam_high.jpg", + "images/task_070_episode_16_630_cam_high.jpg", + "images/task_070_episode_16_472_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_070_episode_16_551_cam_high.jpg", + "images/task_070_episode_16_630_cam_high.jpg", + "images/task_070_episode_16_710_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_070_episode_16_313_cam_high.jpg", + "images/task_070_episode_16_154_cam_high.jpg", + "images/task_070_episode_16_234_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_070_episode_16_1107_cam_high.jpg", + "images/task_070_episode_16_948_cam_high.jpg", + "images/task_070_episode_16_1027_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_070_episode_16_392_cam_high.jpg", + "images/task_070_episode_16_313_cam_high.jpg", + "images/task_070_episode_16_234_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_070_episode_16_869_cam_high.jpg", + "images/task_070_episode_16_1027_cam_high.jpg", + "images/task_070_episode_16_948_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_070_episode_16_234_cam_high.jpg", + "images/task_070_episode_16_75_cam_high.jpg", + "images/task_070_episode_16_154_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_070_episode_16_1265_cam_high.jpg", + "images/task_070_episode_16_1345_cam_high.jpg", + "images/task_070_episode_16_1424_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_070_episode_16_710_cam_high.jpg", + "images/task_070_episode_16_630_cam_high.jpg", + "images/task_070_episode_16_789_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_070_episode_16_948_cam_high.jpg", + "images/task_070_episode_16_869_cam_high.jpg", + "images/task_070_episode_16_789_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_070_episode_16_789_cam_high.jpg", + "images/task_070_episode_16_869_cam_high.jpg", + "images/task_070_episode_16_710_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_16", + "recording_id": "task_070_episode_16", + "episode_id": null, + "camera": "cam_high", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Left\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Left" + } + ], + "images": [ + "images/task_070_episode_63_0_cam_high.jpg", + "images/task_070_episode_63_15_cam_high.jpg", + "images/task_070_episode_63_30_cam_high.jpg", + "images/task_070_episode_63_45_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Left" + } + ], + "images": [ + "images/task_070_episode_63_6_cam_high.jpg", + "images/task_070_episode_63_21_cam_high.jpg", + "images/task_070_episode_63_36_cam_high.jpg", + "images/task_070_episode_63_51_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Left" + } + ], + "images": [ + "images/task_070_episode_63_12_cam_high.jpg", + "images/task_070_episode_63_27_cam_high.jpg", + "images/task_070_episode_63_42_cam_high.jpg", + "images/task_070_episode_63_57_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_070_episode_63_84_cam_high.jpg", + "images/task_070_episode_63_99_cam_high.jpg", + "images/task_070_episode_63_114_cam_high.jpg", + "images/task_070_episode_63_129_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_070_episode_63_90_cam_high.jpg", + "images/task_070_episode_63_105_cam_high.jpg", + "images/task_070_episode_63_120_cam_high.jpg", + "images/task_070_episode_63_135_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_070_episode_63_96_cam_high.jpg", + "images/task_070_episode_63_111_cam_high.jpg", + "images/task_070_episode_63_126_cam_high.jpg", + "images/task_070_episode_63_141_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Up" + } + ], + "images": [ + "images/task_070_episode_63_138_cam_high.jpg", + "images/task_070_episode_63_153_cam_high.jpg", + "images/task_070_episode_63_168_cam_high.jpg", + "images/task_070_episode_63_183_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Up" + } + ], + "images": [ + "images/task_070_episode_63_144_cam_high.jpg", + "images/task_070_episode_63_159_cam_high.jpg", + "images/task_070_episode_63_174_cam_high.jpg", + "images/task_070_episode_63_189_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Up" + } + ], + "images": [ + "images/task_070_episode_63_150_cam_high.jpg", + "images/task_070_episode_63_165_cam_high.jpg", + "images/task_070_episode_63_180_cam_high.jpg", + "images/task_070_episode_63_195_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Left\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Up" + } + ], + "images": [ + "images/task_070_episode_63_216_cam_high.jpg", + "images/task_070_episode_63_231_cam_high.jpg", + "images/task_070_episode_63_246_cam_high.jpg", + "images/task_070_episode_63_261_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Up" + } + ], + "images": [ + "images/task_070_episode_63_222_cam_high.jpg", + "images/task_070_episode_63_237_cam_high.jpg", + "images/task_070_episode_63_252_cam_high.jpg", + "images/task_070_episode_63_267_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Up" + } + ], + "images": [ + "images/task_070_episode_63_228_cam_high.jpg", + "images/task_070_episode_63_243_cam_high.jpg", + "images/task_070_episode_63_258_cam_high.jpg", + "images/task_070_episode_63_273_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Down" + } + ], + "images": [ + "images/task_070_episode_63_354_cam_high.jpg", + "images/task_070_episode_63_369_cam_high.jpg", + "images/task_070_episode_63_384_cam_high.jpg", + "images/task_070_episode_63_399_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Down" + } + ], + "images": [ + "images/task_070_episode_63_360_cam_high.jpg", + "images/task_070_episode_63_375_cam_high.jpg", + "images/task_070_episode_63_390_cam_high.jpg", + "images/task_070_episode_63_405_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Down" + } + ], + "images": [ + "images/task_070_episode_63_366_cam_high.jpg", + "images/task_070_episode_63_381_cam_high.jpg", + "images/task_070_episode_63_396_cam_high.jpg", + "images/task_070_episode_63_411_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_070_episode_63_456_cam_high.jpg", + "images/task_070_episode_63_471_cam_high.jpg", + "images/task_070_episode_63_486_cam_high.jpg", + "images/task_070_episode_63_501_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_070_episode_63_462_cam_high.jpg", + "images/task_070_episode_63_477_cam_high.jpg", + "images/task_070_episode_63_492_cam_high.jpg", + "images/task_070_episode_63_507_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_070_episode_63_468_cam_high.jpg", + "images/task_070_episode_63_483_cam_high.jpg", + "images/task_070_episode_63_498_cam_high.jpg", + "images/task_070_episode_63_513_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Down\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Down" + } + ], + "images": [ + "images/task_070_episode_63_738_cam_high.jpg", + "images/task_070_episode_63_753_cam_high.jpg", + "images/task_070_episode_63_768_cam_high.jpg", + "images/task_070_episode_63_783_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Left\nC. Down\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Down" + } + ], + "images": [ + "images/task_070_episode_63_1086_cam_high.jpg", + "images/task_070_episode_63_1101_cam_high.jpg", + "images/task_070_episode_63_1116_cam_high.jpg", + "images/task_070_episode_63_1131_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Down" + } + ], + "images": [ + "images/task_070_episode_63_1092_cam_high.jpg", + "images/task_070_episode_63_1107_cam_high.jpg", + "images/task_070_episode_63_1122_cam_high.jpg", + "images/task_070_episode_63_1137_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_070_episode_63_1338_cam_high.jpg", + "images/task_070_episode_63_1353_cam_high.jpg", + "images/task_070_episode_63_1368_cam_high.jpg", + "images/task_070_episode_63_1383_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_070_episode_63_1378_cam_high.jpg", + "images/task_070_episode_63_1393_cam_high.jpg", + "images/task_070_episode_63_1408_cam_high.jpg", + "images/task_070_episode_63_1423_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_070_episode_63_730_cam_high.jpg", + "images/task_070_episode_63_745_cam_high.jpg", + "images/task_070_episode_63_760_cam_high.jpg", + "images/task_070_episode_63_775_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_070_episode_63_20_cam_high.jpg", + "images/task_070_episode_63_35_cam_high.jpg", + "images/task_070_episode_63_50_cam_high.jpg", + "images/task_070_episode_63_65_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_070_episode_63_750_cam_high.jpg", + "images/task_070_episode_63_765_cam_high.jpg", + "images/task_070_episode_63_780_cam_high.jpg", + "images/task_070_episode_63_795_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_070_episode_63_1358_cam_high.jpg", + "images/task_070_episode_63_1373_cam_high.jpg", + "images/task_070_episode_63_1388_cam_high.jpg", + "images/task_070_episode_63_1403_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_070_episode_63_831_cam_high.jpg", + "images/task_070_episode_63_846_cam_high.jpg", + "images/task_070_episode_63_861_cam_high.jpg", + "images/task_070_episode_63_876_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_070_episode_63_1176_cam_high.jpg", + "images/task_070_episode_63_1191_cam_high.jpg", + "images/task_070_episode_63_1206_cam_high.jpg", + "images/task_070_episode_63_1221_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_070_episode_63_426_cam_high.jpg", + "images/task_070_episode_63_441_cam_high.jpg", + "images/task_070_episode_63_456_cam_high.jpg", + "images/task_070_episode_63_471_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_070_episode_63_1115_cam_high.jpg", + "images/task_070_episode_63_1130_cam_high.jpg", + "images/task_070_episode_63_1145_cam_high.jpg", + "images/task_070_episode_63_1160_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_070_episode_63_243_cam_high.jpg", + "images/task_070_episode_63_258_cam_high.jpg", + "images/task_070_episode_63_273_cam_high.jpg", + "images/task_070_episode_63_288_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_070_episode_63_385_cam_high.jpg", + "images/task_070_episode_63_400_cam_high.jpg", + "images/task_070_episode_63_415_cam_high.jpg", + "images/task_070_episode_63_430_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_070_episode_63_308_cam_high.jpg", + "images/task_070_episode_63_311_cam_high.jpg", + "images/task_070_episode_63_314_cam_high.jpg", + "images/task_070_episode_63_317_cam_high.jpg", + "images/task_070_episode_63_320_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_070_episode_63_592_cam_high.jpg", + "images/task_070_episode_63_595_cam_high.jpg", + "images/task_070_episode_63_598_cam_high.jpg", + "images/task_070_episode_63_601_cam_high.jpg", + "images/task_070_episode_63_604_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_070_episode_63_130_cam_high.jpg", + "images/task_070_episode_63_133_cam_high.jpg", + "images/task_070_episode_63_136_cam_high.jpg", + "images/task_070_episode_63_139_cam_high.jpg", + "images/task_070_episode_63_142_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_070_episode_63_379_cam_high.jpg", + "images/task_070_episode_63_382_cam_high.jpg", + "images/task_070_episode_63_385_cam_high.jpg", + "images/task_070_episode_63_388_cam_high.jpg", + "images/task_070_episode_63_391_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_070_episode_63_859_cam_high.jpg", + "images/task_070_episode_63_862_cam_high.jpg", + "images/task_070_episode_63_865_cam_high.jpg", + "images/task_070_episode_63_868_cam_high.jpg", + "images/task_070_episode_63_871_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_070_episode_63_290_cam_high.jpg", + "images/task_070_episode_63_293_cam_high.jpg", + "images/task_070_episode_63_296_cam_high.jpg", + "images/task_070_episode_63_299_cam_high.jpg", + "images/task_070_episode_63_302_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_070_episode_63_788_cam_high.jpg", + "images/task_070_episode_63_791_cam_high.jpg", + "images/task_070_episode_63_794_cam_high.jpg", + "images/task_070_episode_63_797_cam_high.jpg", + "images/task_070_episode_63_800_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_070_episode_63_610_cam_high.jpg", + "images/task_070_episode_63_613_cam_high.jpg", + "images/task_070_episode_63_616_cam_high.jpg", + "images/task_070_episode_63_619_cam_high.jpg", + "images/task_070_episode_63_622_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Frame X happens before Frame Y" + } + ], + "images": [ + "images/task_070_episode_63_270_cam_high.jpg", + "images/task_070_episode_63_1079_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Frame X happens before Frame Y" + } + ], + "images": [ + "images/task_070_episode_63_375_cam_high.jpg", + "images/task_070_episode_63_929_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Frame X happens before Frame Y" + } + ], + "images": [ + "images/task_070_episode_63_525_cam_high.jpg", + "images/task_070_episode_63_1229_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_070_episode_63_789_cam_high.jpg", + "images/task_070_episode_63_948_cam_high.jpg", + "images/task_070_episode_63_869_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_070_episode_63_1345_cam_high.jpg", + "images/task_070_episode_63_1265_cam_high.jpg", + "images/task_070_episode_63_1424_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_070_episode_63_948_cam_high.jpg", + "images/task_070_episode_63_1107_cam_high.jpg", + "images/task_070_episode_63_1027_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_070_episode_63_789_cam_high.jpg", + "images/task_070_episode_63_710_cam_high.jpg", + "images/task_070_episode_63_869_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_070_episode_63_472_cam_high.jpg", + "images/task_070_episode_63_392_cam_high.jpg", + "images/task_070_episode_63_313_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_070_episode_63_710_cam_high.jpg", + "images/task_070_episode_63_630_cam_high.jpg", + "images/task_070_episode_63_551_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_070_episode_63_869_cam_high.jpg", + "images/task_070_episode_63_948_cam_high.jpg", + "images/task_070_episode_63_1027_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_070_episode_63_75_cam_high.jpg", + "images/task_070_episode_63_154_cam_high.jpg", + "images/task_070_episode_63_234_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_070_episode_63_472_cam_high.jpg", + "images/task_070_episode_63_630_cam_high.jpg", + "images/task_070_episode_63_551_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_070_episode_63_710_cam_high.jpg", + "images/task_070_episode_63_630_cam_high.jpg", + "images/task_070_episode_63_789_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_070_episode_63_1027_cam_high.jpg", + "images/task_070_episode_63_1107_cam_high.jpg", + "images/task_070_episode_63_1186_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_63", + "recording_id": "task_070_episode_63", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Left" + } + ], + "images": [ + "images/task_070_episode_73_0_cam_high.jpg", + "images/task_070_episode_73_15_cam_high.jpg", + "images/task_070_episode_73_30_cam_high.jpg", + "images/task_070_episode_73_45_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Left" + } + ], + "images": [ + "images/task_070_episode_73_6_cam_high.jpg", + "images/task_070_episode_73_21_cam_high.jpg", + "images/task_070_episode_73_36_cam_high.jpg", + "images/task_070_episode_73_51_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Down\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Left" + } + ], + "images": [ + "images/task_070_episode_73_12_cam_high.jpg", + "images/task_070_episode_73_27_cam_high.jpg", + "images/task_070_episode_73_42_cam_high.jpg", + "images/task_070_episode_73_57_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_070_episode_73_72_cam_high.jpg", + "images/task_070_episode_73_87_cam_high.jpg", + "images/task_070_episode_73_102_cam_high.jpg", + "images/task_070_episode_73_117_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_070_episode_73_78_cam_high.jpg", + "images/task_070_episode_73_93_cam_high.jpg", + "images/task_070_episode_73_108_cam_high.jpg", + "images/task_070_episode_73_123_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Left\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_070_episode_73_84_cam_high.jpg", + "images/task_070_episode_73_99_cam_high.jpg", + "images/task_070_episode_73_114_cam_high.jpg", + "images/task_070_episode_73_129_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Left\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_070_episode_73_90_cam_high.jpg", + "images/task_070_episode_73_105_cam_high.jpg", + "images/task_070_episode_73_120_cam_high.jpg", + "images/task_070_episode_73_135_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_070_episode_73_96_cam_high.jpg", + "images/task_070_episode_73_111_cam_high.jpg", + "images/task_070_episode_73_126_cam_high.jpg", + "images/task_070_episode_73_141_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Down\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Up" + } + ], + "images": [ + "images/task_070_episode_73_120_cam_high.jpg", + "images/task_070_episode_73_135_cam_high.jpg", + "images/task_070_episode_73_150_cam_high.jpg", + "images/task_070_episode_73_165_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Up" + } + ], + "images": [ + "images/task_070_episode_73_126_cam_high.jpg", + "images/task_070_episode_73_141_cam_high.jpg", + "images/task_070_episode_73_156_cam_high.jpg", + "images/task_070_episode_73_171_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Up" + } + ], + "images": [ + "images/task_070_episode_73_132_cam_high.jpg", + "images/task_070_episode_73_147_cam_high.jpg", + "images/task_070_episode_73_162_cam_high.jpg", + "images/task_070_episode_73_177_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Down" + } + ], + "images": [ + "images/task_070_episode_73_324_cam_high.jpg", + "images/task_070_episode_73_339_cam_high.jpg", + "images/task_070_episode_73_354_cam_high.jpg", + "images/task_070_episode_73_369_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Down" + } + ], + "images": [ + "images/task_070_episode_73_330_cam_high.jpg", + "images/task_070_episode_73_345_cam_high.jpg", + "images/task_070_episode_73_360_cam_high.jpg", + "images/task_070_episode_73_375_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Down\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Down" + } + ], + "images": [ + "images/task_070_episode_73_336_cam_high.jpg", + "images/task_070_episode_73_351_cam_high.jpg", + "images/task_070_episode_73_366_cam_high.jpg", + "images/task_070_episode_73_381_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_070_episode_73_414_cam_high.jpg", + "images/task_070_episode_73_429_cam_high.jpg", + "images/task_070_episode_73_444_cam_high.jpg", + "images/task_070_episode_73_459_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Down" + } + ], + "images": [ + "images/task_070_episode_73_1056_cam_high.jpg", + "images/task_070_episode_73_1071_cam_high.jpg", + "images/task_070_episode_73_1086_cam_high.jpg", + "images/task_070_episode_73_1101_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Down" + } + ], + "images": [ + "images/task_070_episode_73_1062_cam_high.jpg", + "images/task_070_episode_73_1077_cam_high.jpg", + "images/task_070_episode_73_1092_cam_high.jpg", + "images/task_070_episode_73_1107_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Down" + } + ], + "images": [ + "images/task_070_episode_73_1068_cam_high.jpg", + "images/task_070_episode_73_1083_cam_high.jpg", + "images/task_070_episode_73_1098_cam_high.jpg", + "images/task_070_episode_73_1113_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Up" + } + ], + "images": [ + "images/task_070_episode_73_1098_cam_high.jpg", + "images/task_070_episode_73_1113_cam_high.jpg", + "images/task_070_episode_73_1128_cam_high.jpg", + "images/task_070_episode_73_1143_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Up" + } + ], + "images": [ + "images/task_070_episode_73_1104_cam_high.jpg", + "images/task_070_episode_73_1119_cam_high.jpg", + "images/task_070_episode_73_1134_cam_high.jpg", + "images/task_070_episode_73_1149_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Down\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Up" + } + ], + "images": [ + "images/task_070_episode_73_1110_cam_high.jpg", + "images/task_070_episode_73_1125_cam_high.jpg", + "images/task_070_episode_73_1140_cam_high.jpg", + "images/task_070_episode_73_1155_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_070_episode_73_1439_cam_high.jpg", + "images/task_070_episode_73_1454_cam_high.jpg", + "images/task_070_episode_73_1469_cam_high.jpg", + "images/task_070_episode_73_1484_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_070_episode_73_811_cam_high.jpg", + "images/task_070_episode_73_826_cam_high.jpg", + "images/task_070_episode_73_841_cam_high.jpg", + "images/task_070_episode_73_856_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_070_episode_73_1196_cam_high.jpg", + "images/task_070_episode_73_1211_cam_high.jpg", + "images/task_070_episode_73_1226_cam_high.jpg", + "images/task_070_episode_73_1241_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_070_episode_73_1317_cam_high.jpg", + "images/task_070_episode_73_1332_cam_high.jpg", + "images/task_070_episode_73_1347_cam_high.jpg", + "images/task_070_episode_73_1362_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_070_episode_73_851_cam_high.jpg", + "images/task_070_episode_73_866_cam_high.jpg", + "images/task_070_episode_73_881_cam_high.jpg", + "images/task_070_episode_73_896_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_070_episode_73_527_cam_high.jpg", + "images/task_070_episode_73_542_cam_high.jpg", + "images/task_070_episode_73_557_cam_high.jpg", + "images/task_070_episode_73_572_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_070_episode_73_750_cam_high.jpg", + "images/task_070_episode_73_765_cam_high.jpg", + "images/task_070_episode_73_780_cam_high.jpg", + "images/task_070_episode_73_795_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_070_episode_73_1297_cam_high.jpg", + "images/task_070_episode_73_1312_cam_high.jpg", + "images/task_070_episode_73_1327_cam_high.jpg", + "images/task_070_episode_73_1342_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_070_episode_73_1277_cam_high.jpg", + "images/task_070_episode_73_1292_cam_high.jpg", + "images/task_070_episode_73_1307_cam_high.jpg", + "images/task_070_episode_73_1322_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_070_episode_73_547_cam_high.jpg", + "images/task_070_episode_73_562_cam_high.jpg", + "images/task_070_episode_73_577_cam_high.jpg", + "images/task_070_episode_73_592_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_070_episode_73_790_cam_high.jpg", + "images/task_070_episode_73_805_cam_high.jpg", + "images/task_070_episode_73_820_cam_high.jpg", + "images/task_070_episode_73_835_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_070_episode_73_608_cam_high.jpg", + "images/task_070_episode_73_623_cam_high.jpg", + "images/task_070_episode_73_638_cam_high.jpg", + "images/task_070_episode_73_653_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_070_episode_73_1268_cam_high.jpg", + "images/task_070_episode_73_1271_cam_high.jpg", + "images/task_070_episode_73_1274_cam_high.jpg", + "images/task_070_episode_73_1277_cam_high.jpg", + "images/task_070_episode_73_1280_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_070_episode_73_664_cam_high.jpg", + "images/task_070_episode_73_667_cam_high.jpg", + "images/task_070_episode_73_670_cam_high.jpg", + "images/task_070_episode_73_673_cam_high.jpg", + "images/task_070_episode_73_676_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_070_episode_73_806_cam_high.jpg", + "images/task_070_episode_73_809_cam_high.jpg", + "images/task_070_episode_73_812_cam_high.jpg", + "images/task_070_episode_73_815_cam_high.jpg", + "images/task_070_episode_73_818_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_070_episode_73_1108_cam_high.jpg", + "images/task_070_episode_73_1111_cam_high.jpg", + "images/task_070_episode_73_1114_cam_high.jpg", + "images/task_070_episode_73_1117_cam_high.jpg", + "images/task_070_episode_73_1120_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_070_episode_73_1303_cam_high.jpg", + "images/task_070_episode_73_1306_cam_high.jpg", + "images/task_070_episode_73_1309_cam_high.jpg", + "images/task_070_episode_73_1312_cam_high.jpg", + "images/task_070_episode_73_1315_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_070_episode_73_1321_cam_high.jpg", + "images/task_070_episode_73_1324_cam_high.jpg", + "images/task_070_episode_73_1327_cam_high.jpg", + "images/task_070_episode_73_1330_cam_high.jpg", + "images/task_070_episode_73_1333_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_070_episode_73_1428_cam_high.jpg", + "images/task_070_episode_73_1431_cam_high.jpg", + "images/task_070_episode_73_1434_cam_high.jpg", + "images/task_070_episode_73_1437_cam_high.jpg", + "images/task_070_episode_73_1440_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_070_episode_73_1463_cam_high.jpg", + "images/task_070_episode_73_1466_cam_high.jpg", + "images/task_070_episode_73_1469_cam_high.jpg", + "images/task_070_episode_73_1472_cam_high.jpg", + "images/task_070_episode_73_1475_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Frame X happens before Frame Y" + } + ], + "images": [ + "images/task_070_episode_73_270_cam_high.jpg", + "images/task_070_episode_73_1079_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Frame X happens before Frame Y" + } + ], + "images": [ + "images/task_070_episode_73_375_cam_high.jpg", + "images/task_070_episode_73_929_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Frame X happens before Frame Y" + } + ], + "images": [ + "images/task_070_episode_73_525_cam_high.jpg", + "images/task_070_episode_73_1229_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_070_episode_73_1107_cam_high.jpg", + "images/task_070_episode_73_1027_cam_high.jpg", + "images/task_070_episode_73_948_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_070_episode_73_789_cam_high.jpg", + "images/task_070_episode_73_630_cam_high.jpg", + "images/task_070_episode_73_710_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_070_episode_73_1027_cam_high.jpg", + "images/task_070_episode_73_1186_cam_high.jpg", + "images/task_070_episode_73_1107_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_070_episode_73_869_cam_high.jpg", + "images/task_070_episode_73_710_cam_high.jpg", + "images/task_070_episode_73_789_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_070_episode_73_1186_cam_high.jpg", + "images/task_070_episode_73_1107_cam_high.jpg", + "images/task_070_episode_73_1265_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_070_episode_73_1424_cam_high.jpg", + "images/task_070_episode_73_1345_cam_high.jpg", + "images/task_070_episode_73_1265_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_070_episode_73_551_cam_high.jpg", + "images/task_070_episode_73_630_cam_high.jpg", + "images/task_070_episode_73_472_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_070_episode_73_392_cam_high.jpg", + "images/task_070_episode_73_234_cam_high.jpg", + "images/task_070_episode_73_313_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_070_episode_73_789_cam_high.jpg", + "images/task_070_episode_73_869_cam_high.jpg", + "images/task_070_episode_73_948_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_070_episode_73_1186_cam_high.jpg", + "images/task_070_episode_73_1265_cam_high.jpg", + "images/task_070_episode_73_1345_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_070_episode_73_551_cam_high.jpg", + "images/task_070_episode_73_472_cam_high.jpg", + "images/task_070_episode_73_392_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_070_episode_73_75_cam_high.jpg", + "images/task_070_episode_73_154_cam_high.jpg", + "images/task_070_episode_73_234_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_73", + "recording_id": "task_070_episode_73", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Left" + } + ], + "images": [ + "images/task_070_episode_83_0_cam_high.jpg", + "images/task_070_episode_83_15_cam_high.jpg", + "images/task_070_episode_83_30_cam_high.jpg", + "images/task_070_episode_83_45_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Left" + } + ], + "images": [ + "images/task_070_episode_83_6_cam_high.jpg", + "images/task_070_episode_83_21_cam_high.jpg", + "images/task_070_episode_83_36_cam_high.jpg", + "images/task_070_episode_83_51_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Left" + } + ], + "images": [ + "images/task_070_episode_83_12_cam_high.jpg", + "images/task_070_episode_83_27_cam_high.jpg", + "images/task_070_episode_83_42_cam_high.jpg", + "images/task_070_episode_83_57_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_070_episode_83_90_cam_high.jpg", + "images/task_070_episode_83_105_cam_high.jpg", + "images/task_070_episode_83_120_cam_high.jpg", + "images/task_070_episode_83_135_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_070_episode_83_96_cam_high.jpg", + "images/task_070_episode_83_111_cam_high.jpg", + "images/task_070_episode_83_126_cam_high.jpg", + "images/task_070_episode_83_141_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_070_episode_83_102_cam_high.jpg", + "images/task_070_episode_83_117_cam_high.jpg", + "images/task_070_episode_83_132_cam_high.jpg", + "images/task_070_episode_83_147_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Up" + } + ], + "images": [ + "images/task_070_episode_83_156_cam_high.jpg", + "images/task_070_episode_83_171_cam_high.jpg", + "images/task_070_episode_83_186_cam_high.jpg", + "images/task_070_episode_83_201_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Up" + } + ], + "images": [ + "images/task_070_episode_83_162_cam_high.jpg", + "images/task_070_episode_83_177_cam_high.jpg", + "images/task_070_episode_83_192_cam_high.jpg", + "images/task_070_episode_83_207_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Up" + } + ], + "images": [ + "images/task_070_episode_83_168_cam_high.jpg", + "images/task_070_episode_83_183_cam_high.jpg", + "images/task_070_episode_83_198_cam_high.jpg", + "images/task_070_episode_83_213_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Up" + } + ], + "images": [ + "images/task_070_episode_83_246_cam_high.jpg", + "images/task_070_episode_83_261_cam_high.jpg", + "images/task_070_episode_83_276_cam_high.jpg", + "images/task_070_episode_83_291_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Down" + } + ], + "images": [ + "images/task_070_episode_83_372_cam_high.jpg", + "images/task_070_episode_83_387_cam_high.jpg", + "images/task_070_episode_83_402_cam_high.jpg", + "images/task_070_episode_83_417_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Down" + } + ], + "images": [ + "images/task_070_episode_83_378_cam_high.jpg", + "images/task_070_episode_83_393_cam_high.jpg", + "images/task_070_episode_83_408_cam_high.jpg", + "images/task_070_episode_83_423_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Down" + } + ], + "images": [ + "images/task_070_episode_83_384_cam_high.jpg", + "images/task_070_episode_83_399_cam_high.jpg", + "images/task_070_episode_83_414_cam_high.jpg", + "images/task_070_episode_83_429_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Down\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_070_episode_83_462_cam_high.jpg", + "images/task_070_episode_83_477_cam_high.jpg", + "images/task_070_episode_83_492_cam_high.jpg", + "images/task_070_episode_83_507_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_070_episode_83_468_cam_high.jpg", + "images/task_070_episode_83_483_cam_high.jpg", + "images/task_070_episode_83_498_cam_high.jpg", + "images/task_070_episode_83_513_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_070_episode_83_474_cam_high.jpg", + "images/task_070_episode_83_489_cam_high.jpg", + "images/task_070_episode_83_504_cam_high.jpg", + "images/task_070_episode_83_519_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Down" + } + ], + "images": [ + "images/task_070_episode_83_1104_cam_high.jpg", + "images/task_070_episode_83_1119_cam_high.jpg", + "images/task_070_episode_83_1134_cam_high.jpg", + "images/task_070_episode_83_1149_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Down" + } + ], + "images": [ + "images/task_070_episode_83_1110_cam_high.jpg", + "images/task_070_episode_83_1125_cam_high.jpg", + "images/task_070_episode_83_1140_cam_high.jpg", + "images/task_070_episode_83_1155_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Down" + } + ], + "images": [ + "images/task_070_episode_83_1116_cam_high.jpg", + "images/task_070_episode_83_1131_cam_high.jpg", + "images/task_070_episode_83_1146_cam_high.jpg", + "images/task_070_episode_83_1161_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_070_episode_83_20_cam_high.jpg", + "images/task_070_episode_83_35_cam_high.jpg", + "images/task_070_episode_83_50_cam_high.jpg", + "images/task_070_episode_83_65_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_070_episode_83_1236_cam_high.jpg", + "images/task_070_episode_83_1251_cam_high.jpg", + "images/task_070_episode_83_1266_cam_high.jpg", + "images/task_070_episode_83_1281_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_070_episode_83_912_cam_high.jpg", + "images/task_070_episode_83_927_cam_high.jpg", + "images/task_070_episode_83_942_cam_high.jpg", + "images/task_070_episode_83_957_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_070_episode_83_426_cam_high.jpg", + "images/task_070_episode_83_441_cam_high.jpg", + "images/task_070_episode_83_456_cam_high.jpg", + "images/task_070_episode_83_471_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_070_episode_83_1135_cam_high.jpg", + "images/task_070_episode_83_1150_cam_high.jpg", + "images/task_070_episode_83_1165_cam_high.jpg", + "images/task_070_episode_83_1180_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_070_episode_83_1398_cam_high.jpg", + "images/task_070_episode_83_1413_cam_high.jpg", + "images/task_070_episode_83_1428_cam_high.jpg", + "images/task_070_episode_83_1443_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_070_episode_83_993_cam_high.jpg", + "images/task_070_episode_83_1008_cam_high.jpg", + "images/task_070_episode_83_1023_cam_high.jpg", + "images/task_070_episode_83_1038_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_070_episode_83_203_cam_high.jpg", + "images/task_070_episode_83_218_cam_high.jpg", + "images/task_070_episode_83_233_cam_high.jpg", + "images/task_070_episode_83_248_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_070_episode_83_1378_cam_high.jpg", + "images/task_070_episode_83_1393_cam_high.jpg", + "images/task_070_episode_83_1408_cam_high.jpg", + "images/task_070_episode_83_1423_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_070_episode_83_41_cam_high.jpg", + "images/task_070_episode_83_56_cam_high.jpg", + "images/task_070_episode_83_71_cam_high.jpg", + "images/task_070_episode_83_86_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_070_episode_83_953_cam_high.jpg", + "images/task_070_episode_83_968_cam_high.jpg", + "images/task_070_episode_83_983_cam_high.jpg", + "images/task_070_episode_83_998_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_070_episode_83_973_cam_high.jpg", + "images/task_070_episode_83_988_cam_high.jpg", + "images/task_070_episode_83_1003_cam_high.jpg", + "images/task_070_episode_83_1018_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_070_episode_83_24_cam_high.jpg", + "images/task_070_episode_83_27_cam_high.jpg", + "images/task_070_episode_83_30_cam_high.jpg", + "images/task_070_episode_83_33_cam_high.jpg", + "images/task_070_episode_83_36_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_070_episode_83_841_cam_high.jpg", + "images/task_070_episode_83_844_cam_high.jpg", + "images/task_070_episode_83_847_cam_high.jpg", + "images/task_070_episode_83_850_cam_high.jpg", + "images/task_070_episode_83_853_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_070_episode_83_681_cam_high.jpg", + "images/task_070_episode_83_684_cam_high.jpg", + "images/task_070_episode_83_687_cam_high.jpg", + "images/task_070_episode_83_690_cam_high.jpg", + "images/task_070_episode_83_693_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_070_episode_83_1161_cam_high.jpg", + "images/task_070_episode_83_1164_cam_high.jpg", + "images/task_070_episode_83_1167_cam_high.jpg", + "images/task_070_episode_83_1170_cam_high.jpg", + "images/task_070_episode_83_1173_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_070_episode_83_308_cam_high.jpg", + "images/task_070_episode_83_311_cam_high.jpg", + "images/task_070_episode_83_314_cam_high.jpg", + "images/task_070_episode_83_317_cam_high.jpg", + "images/task_070_episode_83_320_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_070_episode_83_1481_cam_high.jpg", + "images/task_070_episode_83_1484_cam_high.jpg", + "images/task_070_episode_83_1487_cam_high.jpg", + "images/task_070_episode_83_1490_cam_high.jpg", + "images/task_070_episode_83_1493_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_070_episode_83_184_cam_high.jpg", + "images/task_070_episode_83_187_cam_high.jpg", + "images/task_070_episode_83_190_cam_high.jpg", + "images/task_070_episode_83_193_cam_high.jpg", + "images/task_070_episode_83_196_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_070_episode_83_237_cam_high.jpg", + "images/task_070_episode_83_240_cam_high.jpg", + "images/task_070_episode_83_243_cam_high.jpg", + "images/task_070_episode_83_246_cam_high.jpg", + "images/task_070_episode_83_249_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Frame Y happens before Frame X" + } + ], + "images": [ + "images/task_070_episode_83_1079_cam_high.jpg", + "images/task_070_episode_83_270_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Frame Y happens before Frame X" + } + ], + "images": [ + "images/task_070_episode_83_929_cam_high.jpg", + "images/task_070_episode_83_375_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Frame Y happens before Frame X" + } + ], + "images": [ + "images/task_070_episode_83_1229_cam_high.jpg", + "images/task_070_episode_83_525_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_070_episode_83_948_cam_high.jpg", + "images/task_070_episode_83_1027_cam_high.jpg", + "images/task_070_episode_83_869_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_070_episode_83_1186_cam_high.jpg", + "images/task_070_episode_83_1107_cam_high.jpg", + "images/task_070_episode_83_1027_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_070_episode_83_1265_cam_high.jpg", + "images/task_070_episode_83_1186_cam_high.jpg", + "images/task_070_episode_83_1345_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_070_episode_83_472_cam_high.jpg", + "images/task_070_episode_83_630_cam_high.jpg", + "images/task_070_episode_83_551_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_070_episode_83_710_cam_high.jpg", + "images/task_070_episode_83_630_cam_high.jpg", + "images/task_070_episode_83_789_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_070_episode_83_1186_cam_high.jpg", + "images/task_070_episode_83_1107_cam_high.jpg", + "images/task_070_episode_83_1265_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_070_episode_83_234_cam_high.jpg", + "images/task_070_episode_83_154_cam_high.jpg", + "images/task_070_episode_83_313_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_070_episode_83_392_cam_high.jpg", + "images/task_070_episode_83_472_cam_high.jpg", + "images/task_070_episode_83_313_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_070_episode_83_948_cam_high.jpg", + "images/task_070_episode_83_1027_cam_high.jpg", + "images/task_070_episode_83_1107_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_070_episode_83_710_cam_high.jpg", + "images/task_070_episode_83_869_cam_high.jpg", + "images/task_070_episode_83_789_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_070_episode_83_551_cam_high.jpg", + "images/task_070_episode_83_710_cam_high.jpg", + "images/task_070_episode_83_630_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_070_episode_83_789_cam_high.jpg", + "images/task_070_episode_83_869_cam_high.jpg", + "images/task_070_episode_83_948_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_070_episode_83_234_cam_high.jpg", + "images/task_070_episode_83_392_cam_high.jpg", + "images/task_070_episode_83_313_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_070", + "split": "eval", + "split_group_id": "task_070_episode_83", + "recording_id": "task_070_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Left" + } + ], + "images": [ + "images/task_084_episode_0_0_cam_high.jpg", + "images/task_084_episode_0_15_cam_high.jpg", + "images/task_084_episode_0_30_cam_high.jpg", + "images/task_084_episode_0_45_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Up" + } + ], + "images": [ + "images/task_084_episode_0_6_cam_high.jpg", + "images/task_084_episode_0_21_cam_high.jpg", + "images/task_084_episode_0_36_cam_high.jpg", + "images/task_084_episode_0_51_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Left" + } + ], + "images": [ + "images/task_084_episode_0_6_cam_high.jpg", + "images/task_084_episode_0_21_cam_high.jpg", + "images/task_084_episode_0_36_cam_high.jpg", + "images/task_084_episode_0_51_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Up" + } + ], + "images": [ + "images/task_084_episode_0_12_cam_high.jpg", + "images/task_084_episode_0_27_cam_high.jpg", + "images/task_084_episode_0_42_cam_high.jpg", + "images/task_084_episode_0_57_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Left" + } + ], + "images": [ + "images/task_084_episode_0_12_cam_high.jpg", + "images/task_084_episode_0_27_cam_high.jpg", + "images/task_084_episode_0_42_cam_high.jpg", + "images/task_084_episode_0_57_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Up" + } + ], + "images": [ + "images/task_084_episode_0_18_cam_high.jpg", + "images/task_084_episode_0_33_cam_high.jpg", + "images/task_084_episode_0_48_cam_high.jpg", + "images/task_084_episode_0_63_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_084_episode_0_84_cam_high.jpg", + "images/task_084_episode_0_99_cam_high.jpg", + "images/task_084_episode_0_114_cam_high.jpg", + "images/task_084_episode_0_129_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_084_episode_0_90_cam_high.jpg", + "images/task_084_episode_0_105_cam_high.jpg", + "images/task_084_episode_0_120_cam_high.jpg", + "images/task_084_episode_0_135_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_084_episode_0_96_cam_high.jpg", + "images/task_084_episode_0_111_cam_high.jpg", + "images/task_084_episode_0_126_cam_high.jpg", + "images/task_084_episode_0_141_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Up" + } + ], + "images": [ + "images/task_084_episode_0_126_cam_high.jpg", + "images/task_084_episode_0_141_cam_high.jpg", + "images/task_084_episode_0_156_cam_high.jpg", + "images/task_084_episode_0_171_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Up" + } + ], + "images": [ + "images/task_084_episode_0_132_cam_high.jpg", + "images/task_084_episode_0_147_cam_high.jpg", + "images/task_084_episode_0_162_cam_high.jpg", + "images/task_084_episode_0_177_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Left\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Up" + } + ], + "images": [ + "images/task_084_episode_0_138_cam_high.jpg", + "images/task_084_episode_0_153_cam_high.jpg", + "images/task_084_episode_0_168_cam_high.jpg", + "images/task_084_episode_0_183_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_084_episode_0_222_cam_high.jpg", + "images/task_084_episode_0_237_cam_high.jpg", + "images/task_084_episode_0_252_cam_high.jpg", + "images/task_084_episode_0_267_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_084_episode_0_228_cam_high.jpg", + "images/task_084_episode_0_243_cam_high.jpg", + "images/task_084_episode_0_258_cam_high.jpg", + "images/task_084_episode_0_273_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_084_episode_0_234_cam_high.jpg", + "images/task_084_episode_0_249_cam_high.jpg", + "images/task_084_episode_0_264_cam_high.jpg", + "images/task_084_episode_0_279_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Down" + } + ], + "images": [ + "images/task_084_episode_0_276_cam_high.jpg", + "images/task_084_episode_0_291_cam_high.jpg", + "images/task_084_episode_0_306_cam_high.jpg", + "images/task_084_episode_0_321_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Down" + } + ], + "images": [ + "images/task_084_episode_0_276_cam_high.jpg", + "images/task_084_episode_0_291_cam_high.jpg", + "images/task_084_episode_0_306_cam_high.jpg", + "images/task_084_episode_0_321_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Down" + } + ], + "images": [ + "images/task_084_episode_0_282_cam_high.jpg", + "images/task_084_episode_0_297_cam_high.jpg", + "images/task_084_episode_0_312_cam_high.jpg", + "images/task_084_episode_0_327_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Down" + } + ], + "images": [ + "images/task_084_episode_0_282_cam_high.jpg", + "images/task_084_episode_0_297_cam_high.jpg", + "images/task_084_episode_0_312_cam_high.jpg", + "images/task_084_episode_0_327_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Down" + } + ], + "images": [ + "images/task_084_episode_0_288_cam_high.jpg", + "images/task_084_episode_0_303_cam_high.jpg", + "images/task_084_episode_0_318_cam_high.jpg", + "images/task_084_episode_0_333_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Down" + } + ], + "images": [ + "images/task_084_episode_0_288_cam_high.jpg", + "images/task_084_episode_0_303_cam_high.jpg", + "images/task_084_episode_0_318_cam_high.jpg", + "images/task_084_episode_0_333_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_084_episode_0_31_cam_high.jpg", + "images/task_084_episode_0_46_cam_high.jpg", + "images/task_084_episode_0_61_cam_high.jpg", + "images/task_084_episode_0_76_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_084_episode_0_322_cam_high.jpg", + "images/task_084_episode_0_337_cam_high.jpg", + "images/task_084_episode_0_352_cam_high.jpg", + "images/task_084_episode_0_367_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_084_episode_0_111_cam_high.jpg", + "images/task_084_episode_0_126_cam_high.jpg", + "images/task_084_episode_0_141_cam_high.jpg", + "images/task_084_episode_0_156_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_084_episode_0_427_cam_high.jpg", + "images/task_084_episode_0_442_cam_high.jpg", + "images/task_084_episode_0_457_cam_high.jpg", + "images/task_084_episode_0_472_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_084_episode_0_365_cam_high.jpg", + "images/task_084_episode_0_380_cam_high.jpg", + "images/task_084_episode_0_395_cam_high.jpg", + "images/task_084_episode_0_410_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_084_episode_0_80_cam_high.jpg", + "images/task_084_episode_0_95_cam_high.jpg", + "images/task_084_episode_0_110_cam_high.jpg", + "images/task_084_episode_0_125_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_084_episode_0_315_cam_high.jpg", + "images/task_084_episode_0_330_cam_high.jpg", + "images/task_084_episode_0_345_cam_high.jpg", + "images/task_084_episode_0_360_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_084_episode_0_266_cam_high.jpg", + "images/task_084_episode_0_281_cam_high.jpg", + "images/task_084_episode_0_296_cam_high.jpg", + "images/task_084_episode_0_311_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_084_episode_0_309_cam_high.jpg", + "images/task_084_episode_0_324_cam_high.jpg", + "images/task_084_episode_0_339_cam_high.jpg", + "images/task_084_episode_0_354_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_084_episode_0_352_cam_high.jpg", + "images/task_084_episode_0_367_cam_high.jpg", + "images/task_084_episode_0_382_cam_high.jpg", + "images/task_084_episode_0_397_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_084_episode_0_260_cam_high.jpg", + "images/task_084_episode_0_275_cam_high.jpg", + "images/task_084_episode_0_290_cam_high.jpg", + "images/task_084_episode_0_305_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_084_episode_0_447_cam_high.jpg", + "images/task_084_episode_0_450_cam_high.jpg", + "images/task_084_episode_0_453_cam_high.jpg", + "images/task_084_episode_0_456_cam_high.jpg", + "images/task_084_episode_0_459_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_084_episode_0_395_cam_high.jpg", + "images/task_084_episode_0_398_cam_high.jpg", + "images/task_084_episode_0_401_cam_high.jpg", + "images/task_084_episode_0_404_cam_high.jpg", + "images/task_084_episode_0_407_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_084_episode_0_75_cam_high.jpg", + "images/task_084_episode_0_78_cam_high.jpg", + "images/task_084_episode_0_81_cam_high.jpg", + "images/task_084_episode_0_84_cam_high.jpg", + "images/task_084_episode_0_87_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_084_episode_0_35_cam_high.jpg", + "images/task_084_episode_0_38_cam_high.jpg", + "images/task_084_episode_0_41_cam_high.jpg", + "images/task_084_episode_0_44_cam_high.jpg", + "images/task_084_episode_0_47_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_084_episode_0_470_cam_high.jpg", + "images/task_084_episode_0_473_cam_high.jpg", + "images/task_084_episode_0_476_cam_high.jpg", + "images/task_084_episode_0_479_cam_high.jpg", + "images/task_084_episode_0_482_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_084_episode_0_407_cam_high.jpg", + "images/task_084_episode_0_410_cam_high.jpg", + "images/task_084_episode_0_413_cam_high.jpg", + "images/task_084_episode_0_416_cam_high.jpg", + "images/task_084_episode_0_419_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_084_episode_0_361_cam_high.jpg", + "images/task_084_episode_0_364_cam_high.jpg", + "images/task_084_episode_0_367_cam_high.jpg", + "images/task_084_episode_0_370_cam_high.jpg", + "images/task_084_episode_0_373_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_084_episode_0_372_cam_high.jpg", + "images/task_084_episode_0_375_cam_high.jpg", + "images/task_084_episode_0_378_cam_high.jpg", + "images/task_084_episode_0_381_cam_high.jpg", + "images/task_084_episode_0_384_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Frame X happens before Frame Y" + } + ], + "images": [ + "images/task_084_episode_0_90_cam_high.jpg", + "images/task_084_episode_0_359_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Frame Y happens before Frame X" + } + ], + "images": [ + "images/task_084_episode_0_309_cam_high.jpg", + "images/task_084_episode_0_125_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Frame X happens before Frame Y" + } + ], + "images": [ + "images/task_084_episode_0_175_cam_high.jpg", + "images/task_084_episode_0_409_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_084_episode_0_395_cam_high.jpg", + "images/task_084_episode_0_368_cam_high.jpg", + "images/task_084_episode_0_421_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_084_episode_0_342_cam_high.jpg", + "images/task_084_episode_0_316_cam_high.jpg", + "images/task_084_episode_0_368_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_084_episode_0_51_cam_high.jpg", + "images/task_084_episode_0_104_cam_high.jpg", + "images/task_084_episode_0_78_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_084_episode_0_157_cam_high.jpg", + "images/task_084_episode_0_131_cam_high.jpg", + "images/task_084_episode_0_183_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_084_episode_0_157_cam_high.jpg", + "images/task_084_episode_0_131_cam_high.jpg", + "images/task_084_episode_0_104_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_084_episode_0_157_cam_high.jpg", + "images/task_084_episode_0_183_cam_high.jpg", + "images/task_084_episode_0_210_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_084_episode_0_289_cam_high.jpg", + "images/task_084_episode_0_263_cam_high.jpg", + "images/task_084_episode_0_236_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_084_episode_0_78_cam_high.jpg", + "images/task_084_episode_0_25_cam_high.jpg", + "images/task_084_episode_0_51_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_084_episode_0_448_cam_high.jpg", + "images/task_084_episode_0_421_cam_high.jpg", + "images/task_084_episode_0_474_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_084_episode_0_421_cam_high.jpg", + "images/task_084_episode_0_395_cam_high.jpg", + "images/task_084_episode_0_448_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_084_episode_0_289_cam_high.jpg", + "images/task_084_episode_0_316_cam_high.jpg", + "images/task_084_episode_0_342_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_084_episode_0_236_cam_high.jpg", + "images/task_084_episode_0_210_cam_high.jpg", + "images/task_084_episode_0_183_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_084_episode_0_236_cam_high.jpg", + "images/task_084_episode_0_263_cam_high.jpg", + "images/task_084_episode_0_210_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_0", + "recording_id": "task_084_episode_0", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Up" + } + ], + "images": [ + "images/task_084_episode_10_0_cam_high.jpg", + "images/task_084_episode_10_15_cam_high.jpg", + "images/task_084_episode_10_30_cam_high.jpg", + "images/task_084_episode_10_45_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Up" + } + ], + "images": [ + "images/task_084_episode_10_6_cam_high.jpg", + "images/task_084_episode_10_21_cam_high.jpg", + "images/task_084_episode_10_36_cam_high.jpg", + "images/task_084_episode_10_51_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Left" + } + ], + "images": [ + "images/task_084_episode_10_6_cam_high.jpg", + "images/task_084_episode_10_21_cam_high.jpg", + "images/task_084_episode_10_36_cam_high.jpg", + "images/task_084_episode_10_51_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Left\nC. Down\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Left" + } + ], + "images": [ + "images/task_084_episode_10_12_cam_high.jpg", + "images/task_084_episode_10_27_cam_high.jpg", + "images/task_084_episode_10_42_cam_high.jpg", + "images/task_084_episode_10_57_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Left" + } + ], + "images": [ + "images/task_084_episode_10_18_cam_high.jpg", + "images/task_084_episode_10_33_cam_high.jpg", + "images/task_084_episode_10_48_cam_high.jpg", + "images/task_084_episode_10_63_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_084_episode_10_72_cam_high.jpg", + "images/task_084_episode_10_87_cam_high.jpg", + "images/task_084_episode_10_102_cam_high.jpg", + "images/task_084_episode_10_117_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_084_episode_10_78_cam_high.jpg", + "images/task_084_episode_10_93_cam_high.jpg", + "images/task_084_episode_10_108_cam_high.jpg", + "images/task_084_episode_10_123_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_084_episode_10_84_cam_high.jpg", + "images/task_084_episode_10_99_cam_high.jpg", + "images/task_084_episode_10_114_cam_high.jpg", + "images/task_084_episode_10_129_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_084_episode_10_84_cam_high.jpg", + "images/task_084_episode_10_99_cam_high.jpg", + "images/task_084_episode_10_114_cam_high.jpg", + "images/task_084_episode_10_129_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Down\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_084_episode_10_90_cam_high.jpg", + "images/task_084_episode_10_105_cam_high.jpg", + "images/task_084_episode_10_120_cam_high.jpg", + "images/task_084_episode_10_135_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_084_episode_10_96_cam_high.jpg", + "images/task_084_episode_10_111_cam_high.jpg", + "images/task_084_episode_10_126_cam_high.jpg", + "images/task_084_episode_10_141_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Up" + } + ], + "images": [ + "images/task_084_episode_10_126_cam_high.jpg", + "images/task_084_episode_10_141_cam_high.jpg", + "images/task_084_episode_10_156_cam_high.jpg", + "images/task_084_episode_10_171_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Up" + } + ], + "images": [ + "images/task_084_episode_10_132_cam_high.jpg", + "images/task_084_episode_10_147_cam_high.jpg", + "images/task_084_episode_10_162_cam_high.jpg", + "images/task_084_episode_10_177_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Up" + } + ], + "images": [ + "images/task_084_episode_10_138_cam_high.jpg", + "images/task_084_episode_10_153_cam_high.jpg", + "images/task_084_episode_10_168_cam_high.jpg", + "images/task_084_episode_10_183_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Up" + } + ], + "images": [ + "images/task_084_episode_10_144_cam_high.jpg", + "images/task_084_episode_10_159_cam_high.jpg", + "images/task_084_episode_10_174_cam_high.jpg", + "images/task_084_episode_10_189_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Down" + } + ], + "images": [ + "images/task_084_episode_10_282_cam_high.jpg", + "images/task_084_episode_10_297_cam_high.jpg", + "images/task_084_episode_10_312_cam_high.jpg", + "images/task_084_episode_10_327_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Down" + } + ], + "images": [ + "images/task_084_episode_10_288_cam_high.jpg", + "images/task_084_episode_10_303_cam_high.jpg", + "images/task_084_episode_10_318_cam_high.jpg", + "images/task_084_episode_10_333_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Down" + } + ], + "images": [ + "images/task_084_episode_10_288_cam_high.jpg", + "images/task_084_episode_10_303_cam_high.jpg", + "images/task_084_episode_10_318_cam_high.jpg", + "images/task_084_episode_10_333_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Down" + } + ], + "images": [ + "images/task_084_episode_10_294_cam_high.jpg", + "images/task_084_episode_10_309_cam_high.jpg", + "images/task_084_episode_10_324_cam_high.jpg", + "images/task_084_episode_10_339_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Down" + } + ], + "images": [ + "images/task_084_episode_10_294_cam_high.jpg", + "images/task_084_episode_10_309_cam_high.jpg", + "images/task_084_episode_10_324_cam_high.jpg", + "images/task_084_episode_10_339_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Down" + } + ], + "images": [ + "images/task_084_episode_10_300_cam_high.jpg", + "images/task_084_episode_10_315_cam_high.jpg", + "images/task_084_episode_10_330_cam_high.jpg", + "images/task_084_episode_10_345_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_084_episode_10_31_cam_high.jpg", + "images/task_084_episode_10_46_cam_high.jpg", + "images/task_084_episode_10_61_cam_high.jpg", + "images/task_084_episode_10_76_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_084_episode_10_68_cam_high.jpg", + "images/task_084_episode_10_83_cam_high.jpg", + "images/task_084_episode_10_98_cam_high.jpg", + "images/task_084_episode_10_113_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_084_episode_10_241_cam_high.jpg", + "images/task_084_episode_10_256_cam_high.jpg", + "images/task_084_episode_10_271_cam_high.jpg", + "images/task_084_episode_10_286_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_084_episode_10_210_cam_high.jpg", + "images/task_084_episode_10_225_cam_high.jpg", + "images/task_084_episode_10_240_cam_high.jpg", + "images/task_084_episode_10_255_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_084_episode_10_328_cam_high.jpg", + "images/task_084_episode_10_343_cam_high.jpg", + "images/task_084_episode_10_358_cam_high.jpg", + "images/task_084_episode_10_373_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_084_episode_10_352_cam_high.jpg", + "images/task_084_episode_10_367_cam_high.jpg", + "images/task_084_episode_10_382_cam_high.jpg", + "images/task_084_episode_10_397_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_084_episode_10_6_cam_high.jpg", + "images/task_084_episode_10_21_cam_high.jpg", + "images/task_084_episode_10_36_cam_high.jpg", + "images/task_084_episode_10_51_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_084_episode_10_340_cam_high.jpg", + "images/task_084_episode_10_355_cam_high.jpg", + "images/task_084_episode_10_370_cam_high.jpg", + "images/task_084_episode_10_385_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_084_episode_10_322_cam_high.jpg", + "images/task_084_episode_10_337_cam_high.jpg", + "images/task_084_episode_10_352_cam_high.jpg", + "images/task_084_episode_10_367_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_084_episode_10_359_cam_high.jpg", + "images/task_084_episode_10_374_cam_high.jpg", + "images/task_084_episode_10_389_cam_high.jpg", + "images/task_084_episode_10_404_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_084_episode_10_6_cam_high.jpg", + "images/task_084_episode_10_9_cam_high.jpg", + "images/task_084_episode_10_12_cam_high.jpg", + "images/task_084_episode_10_15_cam_high.jpg", + "images/task_084_episode_10_18_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_084_episode_10_304_cam_high.jpg", + "images/task_084_episode_10_307_cam_high.jpg", + "images/task_084_episode_10_310_cam_high.jpg", + "images/task_084_episode_10_313_cam_high.jpg", + "images/task_084_episode_10_316_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_084_episode_10_441_cam_high.jpg", + "images/task_084_episode_10_444_cam_high.jpg", + "images/task_084_episode_10_447_cam_high.jpg", + "images/task_084_episode_10_450_cam_high.jpg", + "images/task_084_episode_10_453_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_084_episode_10_424_cam_high.jpg", + "images/task_084_episode_10_427_cam_high.jpg", + "images/task_084_episode_10_430_cam_high.jpg", + "images/task_084_episode_10_433_cam_high.jpg", + "images/task_084_episode_10_436_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_084_episode_10_412_cam_high.jpg", + "images/task_084_episode_10_415_cam_high.jpg", + "images/task_084_episode_10_418_cam_high.jpg", + "images/task_084_episode_10_421_cam_high.jpg", + "images/task_084_episode_10_424_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_084_episode_10_452_cam_high.jpg", + "images/task_084_episode_10_455_cam_high.jpg", + "images/task_084_episode_10_458_cam_high.jpg", + "images/task_084_episode_10_461_cam_high.jpg", + "images/task_084_episode_10_464_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_084_episode_10_338_cam_high.jpg", + "images/task_084_episode_10_341_cam_high.jpg", + "images/task_084_episode_10_344_cam_high.jpg", + "images/task_084_episode_10_347_cam_high.jpg", + "images/task_084_episode_10_350_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_084_episode_10_40_cam_high.jpg", + "images/task_084_episode_10_43_cam_high.jpg", + "images/task_084_episode_10_46_cam_high.jpg", + "images/task_084_episode_10_49_cam_high.jpg", + "images/task_084_episode_10_52_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Frame Y happens before Frame X" + } + ], + "images": [ + "images/task_084_episode_10_359_cam_high.jpg", + "images/task_084_episode_10_90_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Frame X happens before Frame Y" + } + ], + "images": [ + "images/task_084_episode_10_125_cam_high.jpg", + "images/task_084_episode_10_309_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Frame X happens before Frame Y" + } + ], + "images": [ + "images/task_084_episode_10_175_cam_high.jpg", + "images/task_084_episode_10_409_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_084_episode_10_104_cam_high.jpg", + "images/task_084_episode_10_157_cam_high.jpg", + "images/task_084_episode_10_131_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_084_episode_10_157_cam_high.jpg", + "images/task_084_episode_10_183_cam_high.jpg", + "images/task_084_episode_10_131_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_084_episode_10_236_cam_high.jpg", + "images/task_084_episode_10_210_cam_high.jpg", + "images/task_084_episode_10_183_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_084_episode_10_448_cam_high.jpg", + "images/task_084_episode_10_421_cam_high.jpg", + "images/task_084_episode_10_474_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_084_episode_10_421_cam_high.jpg", + "images/task_084_episode_10_448_cam_high.jpg", + "images/task_084_episode_10_395_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_084_episode_10_342_cam_high.jpg", + "images/task_084_episode_10_316_cam_high.jpg", + "images/task_084_episode_10_368_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_084_episode_10_25_cam_high.jpg", + "images/task_084_episode_10_51_cam_high.jpg", + "images/task_084_episode_10_78_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_084_episode_10_263_cam_high.jpg", + "images/task_084_episode_10_236_cam_high.jpg", + "images/task_084_episode_10_289_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_084_episode_10_104_cam_high.jpg", + "images/task_084_episode_10_78_cam_high.jpg", + "images/task_084_episode_10_131_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_084_episode_10_368_cam_high.jpg", + "images/task_084_episode_10_395_cam_high.jpg", + "images/task_084_episode_10_421_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_084_episode_10_316_cam_high.jpg", + "images/task_084_episode_10_342_cam_high.jpg", + "images/task_084_episode_10_289_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_084_episode_10_395_cam_high.jpg", + "images/task_084_episode_10_342_cam_high.jpg", + "images/task_084_episode_10_368_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_084_episode_10_263_cam_high.jpg", + "images/task_084_episode_10_236_cam_high.jpg", + "images/task_084_episode_10_210_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_084_episode_10_51_cam_high.jpg", + "images/task_084_episode_10_78_cam_high.jpg", + "images/task_084_episode_10_104_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_10", + "recording_id": "task_084_episode_10", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Left" + } + ], + "images": [ + "images/task_084_episode_5_0_cam_high.jpg", + "images/task_084_episode_5_15_cam_high.jpg", + "images/task_084_episode_5_30_cam_high.jpg", + "images/task_084_episode_5_45_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Left" + } + ], + "images": [ + "images/task_084_episode_5_6_cam_high.jpg", + "images/task_084_episode_5_21_cam_high.jpg", + "images/task_084_episode_5_36_cam_high.jpg", + "images/task_084_episode_5_51_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Left" + } + ], + "images": [ + "images/task_084_episode_5_12_cam_high.jpg", + "images/task_084_episode_5_27_cam_high.jpg", + "images/task_084_episode_5_42_cam_high.jpg", + "images/task_084_episode_5_57_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Up" + } + ], + "images": [ + "images/task_084_episode_5_18_cam_high.jpg", + "images/task_084_episode_5_33_cam_high.jpg", + "images/task_084_episode_5_48_cam_high.jpg", + "images/task_084_episode_5_63_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_084_episode_5_114_cam_high.jpg", + "images/task_084_episode_5_129_cam_high.jpg", + "images/task_084_episode_5_144_cam_high.jpg", + "images/task_084_episode_5_159_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_084_episode_5_114_cam_high.jpg", + "images/task_084_episode_5_129_cam_high.jpg", + "images/task_084_episode_5_144_cam_high.jpg", + "images/task_084_episode_5_159_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_084_episode_5_120_cam_high.jpg", + "images/task_084_episode_5_135_cam_high.jpg", + "images/task_084_episode_5_150_cam_high.jpg", + "images/task_084_episode_5_165_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Down\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_084_episode_5_120_cam_high.jpg", + "images/task_084_episode_5_135_cam_high.jpg", + "images/task_084_episode_5_150_cam_high.jpg", + "images/task_084_episode_5_165_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Left\nC. Down\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_084_episode_5_126_cam_high.jpg", + "images/task_084_episode_5_141_cam_high.jpg", + "images/task_084_episode_5_156_cam_high.jpg", + "images/task_084_episode_5_171_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_084_episode_5_126_cam_high.jpg", + "images/task_084_episode_5_141_cam_high.jpg", + "images/task_084_episode_5_156_cam_high.jpg", + "images/task_084_episode_5_171_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Up" + } + ], + "images": [ + "images/task_084_episode_5_156_cam_high.jpg", + "images/task_084_episode_5_171_cam_high.jpg", + "images/task_084_episode_5_186_cam_high.jpg", + "images/task_084_episode_5_201_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Up" + } + ], + "images": [ + "images/task_084_episode_5_156_cam_high.jpg", + "images/task_084_episode_5_171_cam_high.jpg", + "images/task_084_episode_5_186_cam_high.jpg", + "images/task_084_episode_5_201_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Up" + } + ], + "images": [ + "images/task_084_episode_5_162_cam_high.jpg", + "images/task_084_episode_5_177_cam_high.jpg", + "images/task_084_episode_5_192_cam_high.jpg", + "images/task_084_episode_5_207_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Up" + } + ], + "images": [ + "images/task_084_episode_5_162_cam_high.jpg", + "images/task_084_episode_5_177_cam_high.jpg", + "images/task_084_episode_5_192_cam_high.jpg", + "images/task_084_episode_5_207_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Up" + } + ], + "images": [ + "images/task_084_episode_5_168_cam_high.jpg", + "images/task_084_episode_5_183_cam_high.jpg", + "images/task_084_episode_5_198_cam_high.jpg", + "images/task_084_episode_5_213_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Down" + } + ], + "images": [ + "images/task_084_episode_5_258_cam_high.jpg", + "images/task_084_episode_5_273_cam_high.jpg", + "images/task_084_episode_5_288_cam_high.jpg", + "images/task_084_episode_5_303_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Down" + } + ], + "images": [ + "images/task_084_episode_5_258_cam_high.jpg", + "images/task_084_episode_5_273_cam_high.jpg", + "images/task_084_episode_5_288_cam_high.jpg", + "images/task_084_episode_5_303_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Down" + } + ], + "images": [ + "images/task_084_episode_5_264_cam_high.jpg", + "images/task_084_episode_5_279_cam_high.jpg", + "images/task_084_episode_5_294_cam_high.jpg", + "images/task_084_episode_5_309_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Down" + } + ], + "images": [ + "images/task_084_episode_5_264_cam_high.jpg", + "images/task_084_episode_5_279_cam_high.jpg", + "images/task_084_episode_5_294_cam_high.jpg", + "images/task_084_episode_5_309_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Down" + } + ], + "images": [ + "images/task_084_episode_5_270_cam_high.jpg", + "images/task_084_episode_5_285_cam_high.jpg", + "images/task_084_episode_5_300_cam_high.jpg", + "images/task_084_episode_5_315_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Down" + } + ], + "images": [ + "images/task_084_episode_5_270_cam_high.jpg", + "images/task_084_episode_5_285_cam_high.jpg", + "images/task_084_episode_5_300_cam_high.jpg", + "images/task_084_episode_5_315_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_084_episode_5_235_cam_high.jpg", + "images/task_084_episode_5_250_cam_high.jpg", + "images/task_084_episode_5_265_cam_high.jpg", + "images/task_084_episode_5_280_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_084_episode_5_216_cam_high.jpg", + "images/task_084_episode_5_231_cam_high.jpg", + "images/task_084_episode_5_246_cam_high.jpg", + "images/task_084_episode_5_261_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_084_episode_5_43_cam_high.jpg", + "images/task_084_episode_5_58_cam_high.jpg", + "images/task_084_episode_5_73_cam_high.jpg", + "images/task_084_episode_5_88_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_084_episode_5_328_cam_high.jpg", + "images/task_084_episode_5_343_cam_high.jpg", + "images/task_084_episode_5_358_cam_high.jpg", + "images/task_084_episode_5_373_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_084_episode_5_6_cam_high.jpg", + "images/task_084_episode_5_21_cam_high.jpg", + "images/task_084_episode_5_36_cam_high.jpg", + "images/task_084_episode_5_51_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_084_episode_5_80_cam_high.jpg", + "images/task_084_episode_5_95_cam_high.jpg", + "images/task_084_episode_5_110_cam_high.jpg", + "images/task_084_episode_5_125_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_084_episode_5_25_cam_high.jpg", + "images/task_084_episode_5_40_cam_high.jpg", + "images/task_084_episode_5_55_cam_high.jpg", + "images/task_084_episode_5_70_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_084_episode_5_352_cam_high.jpg", + "images/task_084_episode_5_367_cam_high.jpg", + "images/task_084_episode_5_382_cam_high.jpg", + "images/task_084_episode_5_397_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_084_episode_5_365_cam_high.jpg", + "images/task_084_episode_5_380_cam_high.jpg", + "images/task_084_episode_5_395_cam_high.jpg", + "images/task_084_episode_5_410_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_084_episode_5_464_cam_high.jpg", + "images/task_084_episode_5_467_cam_high.jpg", + "images/task_084_episode_5_470_cam_high.jpg", + "images/task_084_episode_5_473_cam_high.jpg", + "images/task_084_episode_5_476_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_084_episode_5_12_cam_high.jpg", + "images/task_084_episode_5_15_cam_high.jpg", + "images/task_084_episode_5_18_cam_high.jpg", + "images/task_084_episode_5_21_cam_high.jpg", + "images/task_084_episode_5_24_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_084_episode_5_138_cam_high.jpg", + "images/task_084_episode_5_141_cam_high.jpg", + "images/task_084_episode_5_144_cam_high.jpg", + "images/task_084_episode_5_147_cam_high.jpg", + "images/task_084_episode_5_150_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_084_episode_5_315_cam_high.jpg", + "images/task_084_episode_5_318_cam_high.jpg", + "images/task_084_episode_5_321_cam_high.jpg", + "images/task_084_episode_5_324_cam_high.jpg", + "images/task_084_episode_5_327_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_084_episode_5_435_cam_high.jpg", + "images/task_084_episode_5_438_cam_high.jpg", + "images/task_084_episode_5_441_cam_high.jpg", + "images/task_084_episode_5_444_cam_high.jpg", + "images/task_084_episode_5_447_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_084_episode_5_92_cam_high.jpg", + "images/task_084_episode_5_95_cam_high.jpg", + "images/task_084_episode_5_98_cam_high.jpg", + "images/task_084_episode_5_101_cam_high.jpg", + "images/task_084_episode_5_104_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_084_episode_5_355_cam_high.jpg", + "images/task_084_episode_5_358_cam_high.jpg", + "images/task_084_episode_5_361_cam_high.jpg", + "images/task_084_episode_5_364_cam_high.jpg", + "images/task_084_episode_5_367_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_084_episode_5_58_cam_high.jpg", + "images/task_084_episode_5_61_cam_high.jpg", + "images/task_084_episode_5_64_cam_high.jpg", + "images/task_084_episode_5_67_cam_high.jpg", + "images/task_084_episode_5_70_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Frame Y happens before Frame X" + } + ], + "images": [ + "images/task_084_episode_5_359_cam_high.jpg", + "images/task_084_episode_5_90_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Frame X happens before Frame Y" + } + ], + "images": [ + "images/task_084_episode_5_125_cam_high.jpg", + "images/task_084_episode_5_309_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Frame X happens before Frame Y" + } + ], + "images": [ + "images/task_084_episode_5_175_cam_high.jpg", + "images/task_084_episode_5_409_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_084_episode_5_395_cam_high.jpg", + "images/task_084_episode_5_421_cam_high.jpg", + "images/task_084_episode_5_448_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_084_episode_5_131_cam_high.jpg", + "images/task_084_episode_5_104_cam_high.jpg", + "images/task_084_episode_5_157_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_084_episode_5_104_cam_high.jpg", + "images/task_084_episode_5_78_cam_high.jpg", + "images/task_084_episode_5_51_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_084_episode_5_342_cam_high.jpg", + "images/task_084_episode_5_395_cam_high.jpg", + "images/task_084_episode_5_368_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_084_episode_5_157_cam_high.jpg", + "images/task_084_episode_5_131_cam_high.jpg", + "images/task_084_episode_5_183_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_084_episode_5_316_cam_high.jpg", + "images/task_084_episode_5_342_cam_high.jpg", + "images/task_084_episode_5_368_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_084_episode_5_78_cam_high.jpg", + "images/task_084_episode_5_51_cam_high.jpg", + "images/task_084_episode_5_25_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_084_episode_5_289_cam_high.jpg", + "images/task_084_episode_5_316_cam_high.jpg", + "images/task_084_episode_5_342_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_084_episode_5_183_cam_high.jpg", + "images/task_084_episode_5_210_cam_high.jpg", + "images/task_084_episode_5_157_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_084_episode_5_263_cam_high.jpg", + "images/task_084_episode_5_316_cam_high.jpg", + "images/task_084_episode_5_289_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_084_episode_5_236_cam_high.jpg", + "images/task_084_episode_5_210_cam_high.jpg", + "images/task_084_episode_5_263_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_084_episode_5_395_cam_high.jpg", + "images/task_084_episode_5_368_cam_high.jpg", + "images/task_084_episode_5_421_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_084_episode_5_236_cam_high.jpg", + "images/task_084_episode_5_289_cam_high.jpg", + "images/task_084_episode_5_263_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_084_episode_5_78_cam_high.jpg", + "images/task_084_episode_5_131_cam_high.jpg", + "images/task_084_episode_5_104_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_084_episode_5_448_cam_high.jpg", + "images/task_084_episode_5_421_cam_high.jpg", + "images/task_084_episode_5_474_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_5", + "recording_id": "task_084_episode_5", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Up" + } + ], + "images": [ + "images/task_084_episode_99_0_cam_high.jpg", + "images/task_084_episode_99_15_cam_high.jpg", + "images/task_084_episode_99_30_cam_high.jpg", + "images/task_084_episode_99_45_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_99", + "recording_id": "task_084_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Up" + } + ], + "images": [ + "images/task_084_episode_99_0_cam_high.jpg", + "images/task_084_episode_99_15_cam_high.jpg", + "images/task_084_episode_99_30_cam_high.jpg", + "images/task_084_episode_99_45_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_99", + "recording_id": "task_084_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Up" + } + ], + "images": [ + "images/task_084_episode_99_6_cam_high.jpg", + "images/task_084_episode_99_21_cam_high.jpg", + "images/task_084_episode_99_36_cam_high.jpg", + "images/task_084_episode_99_51_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_99", + "recording_id": "task_084_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Up" + } + ], + "images": [ + "images/task_084_episode_99_6_cam_high.jpg", + "images/task_084_episode_99_21_cam_high.jpg", + "images/task_084_episode_99_36_cam_high.jpg", + "images/task_084_episode_99_51_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_99", + "recording_id": "task_084_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Left\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Left" + } + ], + "images": [ + "images/task_084_episode_99_24_cam_high.jpg", + "images/task_084_episode_99_39_cam_high.jpg", + "images/task_084_episode_99_54_cam_high.jpg", + "images/task_084_episode_99_69_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_99", + "recording_id": "task_084_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Left" + } + ], + "images": [ + "images/task_084_episode_99_30_cam_high.jpg", + "images/task_084_episode_99_45_cam_high.jpg", + "images/task_084_episode_99_60_cam_high.jpg", + "images/task_084_episode_99_75_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_99", + "recording_id": "task_084_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Left" + } + ], + "images": [ + "images/task_084_episode_99_36_cam_high.jpg", + "images/task_084_episode_99_51_cam_high.jpg", + "images/task_084_episode_99_66_cam_high.jpg", + "images/task_084_episode_99_81_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_99", + "recording_id": "task_084_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_084_episode_99_78_cam_high.jpg", + "images/task_084_episode_99_93_cam_high.jpg", + "images/task_084_episode_99_108_cam_high.jpg", + "images/task_084_episode_99_123_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_99", + "recording_id": "task_084_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Left\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_084_episode_99_84_cam_high.jpg", + "images/task_084_episode_99_99_cam_high.jpg", + "images/task_084_episode_99_114_cam_high.jpg", + "images/task_084_episode_99_129_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_99", + "recording_id": "task_084_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_084_episode_99_84_cam_high.jpg", + "images/task_084_episode_99_99_cam_high.jpg", + "images/task_084_episode_99_114_cam_high.jpg", + "images/task_084_episode_99_129_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_99", + "recording_id": "task_084_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Left\nC. Down\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_084_episode_99_90_cam_high.jpg", + "images/task_084_episode_99_105_cam_high.jpg", + "images/task_084_episode_99_120_cam_high.jpg", + "images/task_084_episode_99_135_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_99", + "recording_id": "task_084_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_084_episode_99_90_cam_high.jpg", + "images/task_084_episode_99_105_cam_high.jpg", + "images/task_084_episode_99_120_cam_high.jpg", + "images/task_084_episode_99_135_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_99", + "recording_id": "task_084_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_084_episode_99_96_cam_high.jpg", + "images/task_084_episode_99_111_cam_high.jpg", + "images/task_084_episode_99_126_cam_high.jpg", + "images/task_084_episode_99_141_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_99", + "recording_id": "task_084_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Up" + } + ], + "images": [ + "images/task_084_episode_99_126_cam_high.jpg", + "images/task_084_episode_99_141_cam_high.jpg", + "images/task_084_episode_99_156_cam_high.jpg", + "images/task_084_episode_99_171_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_99", + "recording_id": "task_084_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Up" + } + ], + "images": [ + "images/task_084_episode_99_132_cam_high.jpg", + "images/task_084_episode_99_147_cam_high.jpg", + "images/task_084_episode_99_162_cam_high.jpg", + "images/task_084_episode_99_177_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_99", + "recording_id": "task_084_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Down" + } + ], + "images": [ + "images/task_084_episode_99_276_cam_high.jpg", + "images/task_084_episode_99_291_cam_high.jpg", + "images/task_084_episode_99_306_cam_high.jpg", + "images/task_084_episode_99_321_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_99", + "recording_id": "task_084_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Down" + } + ], + "images": [ + "images/task_084_episode_99_282_cam_high.jpg", + "images/task_084_episode_99_297_cam_high.jpg", + "images/task_084_episode_99_312_cam_high.jpg", + "images/task_084_episode_99_327_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_99", + "recording_id": "task_084_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Left\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Down" + } + ], + "images": [ + "images/task_084_episode_99_282_cam_high.jpg", + "images/task_084_episode_99_297_cam_high.jpg", + "images/task_084_episode_99_312_cam_high.jpg", + "images/task_084_episode_99_327_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_99", + "recording_id": "task_084_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Down" + } + ], + "images": [ + "images/task_084_episode_99_288_cam_high.jpg", + "images/task_084_episode_99_303_cam_high.jpg", + "images/task_084_episode_99_318_cam_high.jpg", + "images/task_084_episode_99_333_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_99", + "recording_id": "task_084_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Down" + } + ], + "images": [ + "images/task_084_episode_99_288_cam_high.jpg", + "images/task_084_episode_99_303_cam_high.jpg", + "images/task_084_episode_99_318_cam_high.jpg", + "images/task_084_episode_99_333_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_99", + "recording_id": "task_084_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Down" + } + ], + "images": [ + "images/task_084_episode_99_294_cam_high.jpg", + "images/task_084_episode_99_309_cam_high.jpg", + "images/task_084_episode_99_324_cam_high.jpg", + "images/task_084_episode_99_339_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_99", + "recording_id": "task_084_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_084_episode_99_352_cam_high.jpg", + "images/task_084_episode_99_367_cam_high.jpg", + "images/task_084_episode_99_382_cam_high.jpg", + "images/task_084_episode_99_397_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_99", + "recording_id": "task_084_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_084_episode_99_408_cam_high.jpg", + "images/task_084_episode_99_423_cam_high.jpg", + "images/task_084_episode_99_438_cam_high.jpg", + "images/task_084_episode_99_453_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_99", + "recording_id": "task_084_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_084_episode_99_322_cam_high.jpg", + "images/task_084_episode_99_337_cam_high.jpg", + "images/task_084_episode_99_352_cam_high.jpg", + "images/task_084_episode_99_367_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_99", + "recording_id": "task_084_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_084_episode_99_396_cam_high.jpg", + "images/task_084_episode_99_411_cam_high.jpg", + "images/task_084_episode_99_426_cam_high.jpg", + "images/task_084_episode_99_441_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_99", + "recording_id": "task_084_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_084_episode_99_117_cam_high.jpg", + "images/task_084_episode_99_132_cam_high.jpg", + "images/task_084_episode_99_147_cam_high.jpg", + "images/task_084_episode_99_162_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_99", + "recording_id": "task_084_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_084_episode_99_334_cam_high.jpg", + "images/task_084_episode_99_349_cam_high.jpg", + "images/task_084_episode_99_364_cam_high.jpg", + "images/task_084_episode_99_379_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_99", + "recording_id": "task_084_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_084_episode_99_372_cam_high.jpg", + "images/task_084_episode_99_375_cam_high.jpg", + "images/task_084_episode_99_378_cam_high.jpg", + "images/task_084_episode_99_381_cam_high.jpg", + "images/task_084_episode_99_384_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_99", + "recording_id": "task_084_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_084_episode_99_355_cam_high.jpg", + "images/task_084_episode_99_358_cam_high.jpg", + "images/task_084_episode_99_361_cam_high.jpg", + "images/task_084_episode_99_364_cam_high.jpg", + "images/task_084_episode_99_367_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_99", + "recording_id": "task_084_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_084_episode_99_447_cam_high.jpg", + "images/task_084_episode_99_450_cam_high.jpg", + "images/task_084_episode_99_453_cam_high.jpg", + "images/task_084_episode_99_456_cam_high.jpg", + "images/task_084_episode_99_459_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_99", + "recording_id": "task_084_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_084_episode_99_98_cam_high.jpg", + "images/task_084_episode_99_101_cam_high.jpg", + "images/task_084_episode_99_104_cam_high.jpg", + "images/task_084_episode_99_107_cam_high.jpg", + "images/task_084_episode_99_110_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_99", + "recording_id": "task_084_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_084_episode_99_355_cam_high.jpg", + "images/task_084_episode_99_358_cam_high.jpg", + "images/task_084_episode_99_361_cam_high.jpg", + "images/task_084_episode_99_364_cam_high.jpg", + "images/task_084_episode_99_367_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_99", + "recording_id": "task_084_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_084_episode_99_315_cam_high.jpg", + "images/task_084_episode_99_318_cam_high.jpg", + "images/task_084_episode_99_321_cam_high.jpg", + "images/task_084_episode_99_324_cam_high.jpg", + "images/task_084_episode_99_327_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_99", + "recording_id": "task_084_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_084_episode_99_464_cam_high.jpg", + "images/task_084_episode_99_467_cam_high.jpg", + "images/task_084_episode_99_470_cam_high.jpg", + "images/task_084_episode_99_473_cam_high.jpg", + "images/task_084_episode_99_476_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_99", + "recording_id": "task_084_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_084_episode_99_447_cam_high.jpg", + "images/task_084_episode_99_450_cam_high.jpg", + "images/task_084_episode_99_453_cam_high.jpg", + "images/task_084_episode_99_456_cam_high.jpg", + "images/task_084_episode_99_459_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_99", + "recording_id": "task_084_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Frame Y happens before Frame X" + } + ], + "images": [ + "images/task_084_episode_99_359_cam_high.jpg", + "images/task_084_episode_99_90_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_99", + "recording_id": "task_084_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Frame Y happens before Frame X" + } + ], + "images": [ + "images/task_084_episode_99_309_cam_high.jpg", + "images/task_084_episode_99_125_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_99", + "recording_id": "task_084_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Frame Y happens before Frame X" + } + ], + "images": [ + "images/task_084_episode_99_409_cam_high.jpg", + "images/task_084_episode_99_175_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_99", + "recording_id": "task_084_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_084_episode_99_289_cam_high.jpg", + "images/task_084_episode_99_236_cam_high.jpg", + "images/task_084_episode_99_263_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_99", + "recording_id": "task_084_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_084_episode_99_474_cam_high.jpg", + "images/task_084_episode_99_421_cam_high.jpg", + "images/task_084_episode_99_448_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_99", + "recording_id": "task_084_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_084_episode_99_183_cam_high.jpg", + "images/task_084_episode_99_210_cam_high.jpg", + "images/task_084_episode_99_157_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_99", + "recording_id": "task_084_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_084_episode_99_236_cam_high.jpg", + "images/task_084_episode_99_263_cam_high.jpg", + "images/task_084_episode_99_210_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_99", + "recording_id": "task_084_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_084_episode_99_25_cam_high.jpg", + "images/task_084_episode_99_51_cam_high.jpg", + "images/task_084_episode_99_78_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_99", + "recording_id": "task_084_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_084_episode_99_131_cam_high.jpg", + "images/task_084_episode_99_104_cam_high.jpg", + "images/task_084_episode_99_157_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_084", + "split": "eval", + "split_group_id": "task_084_episode_99", + "recording_id": "task_084_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Down\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Up" + } + ], + "images": [ + "images/task_089_episode_31_0_cam_high.jpg", + "images/task_089_episode_31_15_cam_high.jpg", + "images/task_089_episode_31_30_cam_high.jpg", + "images/task_089_episode_31_45_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Up" + } + ], + "images": [ + "images/task_089_episode_31_6_cam_high.jpg", + "images/task_089_episode_31_21_cam_high.jpg", + "images/task_089_episode_31_36_cam_high.jpg", + "images/task_089_episode_31_51_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Left" + } + ], + "images": [ + "images/task_089_episode_31_12_cam_high.jpg", + "images/task_089_episode_31_27_cam_high.jpg", + "images/task_089_episode_31_42_cam_high.jpg", + "images/task_089_episode_31_57_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Left" + } + ], + "images": [ + "images/task_089_episode_31_18_cam_high.jpg", + "images/task_089_episode_31_33_cam_high.jpg", + "images/task_089_episode_31_48_cam_high.jpg", + "images/task_089_episode_31_63_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Left" + } + ], + "images": [ + "images/task_089_episode_31_24_cam_high.jpg", + "images/task_089_episode_31_39_cam_high.jpg", + "images/task_089_episode_31_54_cam_high.jpg", + "images/task_089_episode_31_69_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_089_episode_31_216_cam_high.jpg", + "images/task_089_episode_31_231_cam_high.jpg", + "images/task_089_episode_31_246_cam_high.jpg", + "images/task_089_episode_31_261_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_089_episode_31_222_cam_high.jpg", + "images/task_089_episode_31_237_cam_high.jpg", + "images/task_089_episode_31_252_cam_high.jpg", + "images/task_089_episode_31_267_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Left\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_089_episode_31_228_cam_high.jpg", + "images/task_089_episode_31_243_cam_high.jpg", + "images/task_089_episode_31_258_cam_high.jpg", + "images/task_089_episode_31_273_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Down" + } + ], + "images": [ + "images/task_089_episode_31_258_cam_high.jpg", + "images/task_089_episode_31_273_cam_high.jpg", + "images/task_089_episode_31_288_cam_high.jpg", + "images/task_089_episode_31_303_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Down" + } + ], + "images": [ + "images/task_089_episode_31_264_cam_high.jpg", + "images/task_089_episode_31_279_cam_high.jpg", + "images/task_089_episode_31_294_cam_high.jpg", + "images/task_089_episode_31_309_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Down" + } + ], + "images": [ + "images/task_089_episode_31_270_cam_high.jpg", + "images/task_089_episode_31_285_cam_high.jpg", + "images/task_089_episode_31_300_cam_high.jpg", + "images/task_089_episode_31_315_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Up" + } + ], + "images": [ + "images/task_089_episode_31_306_cam_high.jpg", + "images/task_089_episode_31_321_cam_high.jpg", + "images/task_089_episode_31_336_cam_high.jpg", + "images/task_089_episode_31_351_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Up" + } + ], + "images": [ + "images/task_089_episode_31_312_cam_high.jpg", + "images/task_089_episode_31_327_cam_high.jpg", + "images/task_089_episode_31_342_cam_high.jpg", + "images/task_089_episode_31_357_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_089_episode_31_360_cam_high.jpg", + "images/task_089_episode_31_375_cam_high.jpg", + "images/task_089_episode_31_390_cam_high.jpg", + "images/task_089_episode_31_405_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_089_episode_31_366_cam_high.jpg", + "images/task_089_episode_31_381_cam_high.jpg", + "images/task_089_episode_31_396_cam_high.jpg", + "images/task_089_episode_31_411_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_089_episode_31_372_cam_high.jpg", + "images/task_089_episode_31_387_cam_high.jpg", + "images/task_089_episode_31_402_cam_high.jpg", + "images/task_089_episode_31_417_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Up" + } + ], + "images": [ + "images/task_089_episode_31_402_cam_high.jpg", + "images/task_089_episode_31_417_cam_high.jpg", + "images/task_089_episode_31_432_cam_high.jpg", + "images/task_089_episode_31_447_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Up" + } + ], + "images": [ + "images/task_089_episode_31_408_cam_high.jpg", + "images/task_089_episode_31_423_cam_high.jpg", + "images/task_089_episode_31_438_cam_high.jpg", + "images/task_089_episode_31_453_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Down" + } + ], + "images": [ + "images/task_089_episode_31_456_cam_high.jpg", + "images/task_089_episode_31_471_cam_high.jpg", + "images/task_089_episode_31_486_cam_high.jpg", + "images/task_089_episode_31_501_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Down" + } + ], + "images": [ + "images/task_089_episode_31_594_cam_high.jpg", + "images/task_089_episode_31_609_cam_high.jpg", + "images/task_089_episode_31_624_cam_high.jpg", + "images/task_089_episode_31_639_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Down" + } + ], + "images": [ + "images/task_089_episode_31_600_cam_high.jpg", + "images/task_089_episode_31_615_cam_high.jpg", + "images/task_089_episode_31_630_cam_high.jpg", + "images/task_089_episode_31_645_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_089_episode_31_510_cam_high.jpg", + "images/task_089_episode_31_525_cam_high.jpg", + "images/task_089_episode_31_540_cam_high.jpg", + "images/task_089_episode_31_555_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_089_episode_31_187_cam_high.jpg", + "images/task_089_episode_31_202_cam_high.jpg", + "images/task_089_episode_31_217_cam_high.jpg", + "images/task_089_episode_31_232_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_089_episode_31_468_cam_high.jpg", + "images/task_089_episode_31_483_cam_high.jpg", + "images/task_089_episode_31_498_cam_high.jpg", + "images/task_089_episode_31_513_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_089_episode_31_552_cam_high.jpg", + "images/task_089_episode_31_567_cam_high.jpg", + "images/task_089_episode_31_582_cam_high.jpg", + "images/task_089_episode_31_597_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_089_episode_31_312_cam_high.jpg", + "images/task_089_episode_31_327_cam_high.jpg", + "images/task_089_episode_31_342_cam_high.jpg", + "images/task_089_episode_31_357_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_089_episode_31_302_cam_high.jpg", + "images/task_089_episode_31_317_cam_high.jpg", + "images/task_089_episode_31_332_cam_high.jpg", + "images/task_089_episode_31_347_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_089_episode_31_94_cam_high.jpg", + "images/task_089_episode_31_109_cam_high.jpg", + "images/task_089_episode_31_124_cam_high.jpg", + "images/task_089_episode_31_139_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_089_episode_31_42_cam_high.jpg", + "images/task_089_episode_31_57_cam_high.jpg", + "images/task_089_episode_31_72_cam_high.jpg", + "images/task_089_episode_31_87_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_089_episode_31_0_cam_high.jpg", + "images/task_089_episode_31_15_cam_high.jpg", + "images/task_089_episode_31_30_cam_high.jpg", + "images/task_089_episode_31_45_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_089_episode_31_614_cam_high.jpg", + "images/task_089_episode_31_629_cam_high.jpg", + "images/task_089_episode_31_644_cam_high.jpg", + "images/task_089_episode_31_659_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_089_episode_31_541_cam_high.jpg", + "images/task_089_episode_31_556_cam_high.jpg", + "images/task_089_episode_31_571_cam_high.jpg", + "images/task_089_episode_31_586_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_089_episode_31_73_cam_high.jpg", + "images/task_089_episode_31_88_cam_high.jpg", + "images/task_089_episode_31_103_cam_high.jpg", + "images/task_089_episode_31_118_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_089_episode_31_408_cam_high.jpg", + "images/task_089_episode_31_411_cam_high.jpg", + "images/task_089_episode_31_414_cam_high.jpg", + "images/task_089_episode_31_417_cam_high.jpg", + "images/task_089_episode_31_420_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_089_episode_31_314_cam_high.jpg", + "images/task_089_episode_31_317_cam_high.jpg", + "images/task_089_episode_31_320_cam_high.jpg", + "images/task_089_episode_31_323_cam_high.jpg", + "images/task_089_episode_31_326_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_089_episode_31_370_cam_high.jpg", + "images/task_089_episode_31_373_cam_high.jpg", + "images/task_089_episode_31_376_cam_high.jpg", + "images/task_089_episode_31_379_cam_high.jpg", + "images/task_089_episode_31_382_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_089_episode_31_417_cam_high.jpg", + "images/task_089_episode_31_420_cam_high.jpg", + "images/task_089_episode_31_423_cam_high.jpg", + "images/task_089_episode_31_426_cam_high.jpg", + "images/task_089_episode_31_429_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_089_episode_31_370_cam_high.jpg", + "images/task_089_episode_31_373_cam_high.jpg", + "images/task_089_episode_31_376_cam_high.jpg", + "images/task_089_episode_31_379_cam_high.jpg", + "images/task_089_episode_31_382_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_089_episode_31_165_cam_high.jpg", + "images/task_089_episode_31_168_cam_high.jpg", + "images/task_089_episode_31_171_cam_high.jpg", + "images/task_089_episode_31_174_cam_high.jpg", + "images/task_089_episode_31_177_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_089_episode_31_258_cam_high.jpg", + "images/task_089_episode_31_261_cam_high.jpg", + "images/task_089_episode_31_264_cam_high.jpg", + "images/task_089_episode_31_267_cam_high.jpg", + "images/task_089_episode_31_270_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_089_episode_31_762_cam_high.jpg", + "images/task_089_episode_31_765_cam_high.jpg", + "images/task_089_episode_31_768_cam_high.jpg", + "images/task_089_episode_31_771_cam_high.jpg", + "images/task_089_episode_31_774_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Frame Y happens before Frame X" + } + ], + "images": [ + "images/task_089_episode_31_575_cam_high.jpg", + "images/task_089_episode_31_144_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Frame Y happens before Frame X" + } + ], + "images": [ + "images/task_089_episode_31_495_cam_high.jpg", + "images/task_089_episode_31_200_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Frame X happens before Frame Y" + } + ], + "images": [ + "images/task_089_episode_31_280_cam_high.jpg", + "images/task_089_episode_31_655_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_089_episode_31_378_cam_high.jpg", + "images/task_089_episode_31_463_cam_high.jpg", + "images/task_089_episode_31_421_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_089_episode_31_378_cam_high.jpg", + "images/task_089_episode_31_336_cam_high.jpg", + "images/task_089_episode_31_294_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_089_episode_31_674_cam_high.jpg", + "images/task_089_episode_31_632_cam_high.jpg", + "images/task_089_episode_31_717_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_089_episode_31_209_cam_high.jpg", + "images/task_089_episode_31_167_cam_high.jpg", + "images/task_089_episode_31_251_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_089_episode_31_505_cam_high.jpg", + "images/task_089_episode_31_590_cam_high.jpg", + "images/task_089_episode_31_548_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_089_episode_31_336_cam_high.jpg", + "images/task_089_episode_31_378_cam_high.jpg", + "images/task_089_episode_31_421_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_089_episode_31_125_cam_high.jpg", + "images/task_089_episode_31_82_cam_high.jpg", + "images/task_089_episode_31_167_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_089_episode_31_548_cam_high.jpg", + "images/task_089_episode_31_463_cam_high.jpg", + "images/task_089_episode_31_505_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_089_episode_31_590_cam_high.jpg", + "images/task_089_episode_31_632_cam_high.jpg", + "images/task_089_episode_31_548_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_089_episode_31_294_cam_high.jpg", + "images/task_089_episode_31_251_cam_high.jpg", + "images/task_089_episode_31_336_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_089_episode_31_421_cam_high.jpg", + "images/task_089_episode_31_463_cam_high.jpg", + "images/task_089_episode_31_505_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_31", + "recording_id": "task_089_episode_31", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Up" + } + ], + "images": [ + "images/task_089_episode_36_0_cam_high.jpg", + "images/task_089_episode_36_15_cam_high.jpg", + "images/task_089_episode_36_30_cam_high.jpg", + "images/task_089_episode_36_45_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Up" + } + ], + "images": [ + "images/task_089_episode_36_0_cam_high.jpg", + "images/task_089_episode_36_15_cam_high.jpg", + "images/task_089_episode_36_30_cam_high.jpg", + "images/task_089_episode_36_45_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Left" + } + ], + "images": [ + "images/task_089_episode_36_6_cam_high.jpg", + "images/task_089_episode_36_21_cam_high.jpg", + "images/task_089_episode_36_36_cam_high.jpg", + "images/task_089_episode_36_51_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Left\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Left" + } + ], + "images": [ + "images/task_089_episode_36_12_cam_high.jpg", + "images/task_089_episode_36_27_cam_high.jpg", + "images/task_089_episode_36_42_cam_high.jpg", + "images/task_089_episode_36_57_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Left" + } + ], + "images": [ + "images/task_089_episode_36_18_cam_high.jpg", + "images/task_089_episode_36_33_cam_high.jpg", + "images/task_089_episode_36_48_cam_high.jpg", + "images/task_089_episode_36_63_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Left\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_089_episode_36_192_cam_high.jpg", + "images/task_089_episode_36_207_cam_high.jpg", + "images/task_089_episode_36_222_cam_high.jpg", + "images/task_089_episode_36_237_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_089_episode_36_198_cam_high.jpg", + "images/task_089_episode_36_213_cam_high.jpg", + "images/task_089_episode_36_228_cam_high.jpg", + "images/task_089_episode_36_243_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_089_episode_36_204_cam_high.jpg", + "images/task_089_episode_36_219_cam_high.jpg", + "images/task_089_episode_36_234_cam_high.jpg", + "images/task_089_episode_36_249_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Down" + } + ], + "images": [ + "images/task_089_episode_36_246_cam_high.jpg", + "images/task_089_episode_36_261_cam_high.jpg", + "images/task_089_episode_36_276_cam_high.jpg", + "images/task_089_episode_36_291_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Down" + } + ], + "images": [ + "images/task_089_episode_36_252_cam_high.jpg", + "images/task_089_episode_36_267_cam_high.jpg", + "images/task_089_episode_36_282_cam_high.jpg", + "images/task_089_episode_36_297_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Down" + } + ], + "images": [ + "images/task_089_episode_36_258_cam_high.jpg", + "images/task_089_episode_36_273_cam_high.jpg", + "images/task_089_episode_36_288_cam_high.jpg", + "images/task_089_episode_36_303_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Up" + } + ], + "images": [ + "images/task_089_episode_36_288_cam_high.jpg", + "images/task_089_episode_36_303_cam_high.jpg", + "images/task_089_episode_36_318_cam_high.jpg", + "images/task_089_episode_36_333_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Up" + } + ], + "images": [ + "images/task_089_episode_36_294_cam_high.jpg", + "images/task_089_episode_36_309_cam_high.jpg", + "images/task_089_episode_36_324_cam_high.jpg", + "images/task_089_episode_36_339_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_089_episode_36_348_cam_high.jpg", + "images/task_089_episode_36_363_cam_high.jpg", + "images/task_089_episode_36_378_cam_high.jpg", + "images/task_089_episode_36_393_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_089_episode_36_354_cam_high.jpg", + "images/task_089_episode_36_369_cam_high.jpg", + "images/task_089_episode_36_384_cam_high.jpg", + "images/task_089_episode_36_399_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_089_episode_36_360_cam_high.jpg", + "images/task_089_episode_36_375_cam_high.jpg", + "images/task_089_episode_36_390_cam_high.jpg", + "images/task_089_episode_36_405_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Down\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Up" + } + ], + "images": [ + "images/task_089_episode_36_390_cam_high.jpg", + "images/task_089_episode_36_405_cam_high.jpg", + "images/task_089_episode_36_420_cam_high.jpg", + "images/task_089_episode_36_435_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Up" + } + ], + "images": [ + "images/task_089_episode_36_396_cam_high.jpg", + "images/task_089_episode_36_411_cam_high.jpg", + "images/task_089_episode_36_426_cam_high.jpg", + "images/task_089_episode_36_441_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Down\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Down" + } + ], + "images": [ + "images/task_089_episode_36_444_cam_high.jpg", + "images/task_089_episode_36_459_cam_high.jpg", + "images/task_089_episode_36_474_cam_high.jpg", + "images/task_089_episode_36_489_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Down" + } + ], + "images": [ + "images/task_089_episode_36_450_cam_high.jpg", + "images/task_089_episode_36_465_cam_high.jpg", + "images/task_089_episode_36_480_cam_high.jpg", + "images/task_089_episode_36_495_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Down" + } + ], + "images": [ + "images/task_089_episode_36_522_cam_high.jpg", + "images/task_089_episode_36_537_cam_high.jpg", + "images/task_089_episode_36_552_cam_high.jpg", + "images/task_089_episode_36_567_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_089_episode_36_94_cam_high.jpg", + "images/task_089_episode_36_109_cam_high.jpg", + "images/task_089_episode_36_124_cam_high.jpg", + "images/task_089_episode_36_139_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_089_episode_36_614_cam_high.jpg", + "images/task_089_episode_36_629_cam_high.jpg", + "images/task_089_episode_36_644_cam_high.jpg", + "images/task_089_episode_36_659_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_089_episode_36_302_cam_high.jpg", + "images/task_089_episode_36_317_cam_high.jpg", + "images/task_089_episode_36_332_cam_high.jpg", + "images/task_089_episode_36_347_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_089_episode_36_167_cam_high.jpg", + "images/task_089_episode_36_182_cam_high.jpg", + "images/task_089_episode_36_197_cam_high.jpg", + "images/task_089_episode_36_212_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_089_episode_36_208_cam_high.jpg", + "images/task_089_episode_36_223_cam_high.jpg", + "images/task_089_episode_36_238_cam_high.jpg", + "images/task_089_episode_36_253_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_089_episode_36_250_cam_high.jpg", + "images/task_089_episode_36_265_cam_high.jpg", + "images/task_089_episode_36_280_cam_high.jpg", + "images/task_089_episode_36_295_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_089_episode_36_73_cam_high.jpg", + "images/task_089_episode_36_88_cam_high.jpg", + "images/task_089_episode_36_103_cam_high.jpg", + "images/task_089_episode_36_118_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_089_episode_36_468_cam_high.jpg", + "images/task_089_episode_36_483_cam_high.jpg", + "images/task_089_episode_36_498_cam_high.jpg", + "images/task_089_episode_36_513_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_089_episode_36_510_cam_high.jpg", + "images/task_089_episode_36_525_cam_high.jpg", + "images/task_089_episode_36_540_cam_high.jpg", + "images/task_089_episode_36_555_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_089_episode_36_52_cam_high.jpg", + "images/task_089_episode_36_67_cam_high.jpg", + "images/task_089_episode_36_82_cam_high.jpg", + "images/task_089_episode_36_97_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_089_episode_36_396_cam_high.jpg", + "images/task_089_episode_36_411_cam_high.jpg", + "images/task_089_episode_36_426_cam_high.jpg", + "images/task_089_episode_36_441_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_089_episode_36_62_cam_high.jpg", + "images/task_089_episode_36_77_cam_high.jpg", + "images/task_089_episode_36_92_cam_high.jpg", + "images/task_089_episode_36_107_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_089_episode_36_417_cam_high.jpg", + "images/task_089_episode_36_420_cam_high.jpg", + "images/task_089_episode_36_423_cam_high.jpg", + "images/task_089_episode_36_426_cam_high.jpg", + "images/task_089_episode_36_429_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_089_episode_36_744_cam_high.jpg", + "images/task_089_episode_36_747_cam_high.jpg", + "images/task_089_episode_36_750_cam_high.jpg", + "images/task_089_episode_36_753_cam_high.jpg", + "images/task_089_episode_36_756_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_089_episode_36_323_cam_high.jpg", + "images/task_089_episode_36_326_cam_high.jpg", + "images/task_089_episode_36_329_cam_high.jpg", + "images/task_089_episode_36_332_cam_high.jpg", + "images/task_089_episode_36_335_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_089_episode_36_436_cam_high.jpg", + "images/task_089_episode_36_439_cam_high.jpg", + "images/task_089_episode_36_442_cam_high.jpg", + "images/task_089_episode_36_445_cam_high.jpg", + "images/task_089_episode_36_448_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_089_episode_36_277_cam_high.jpg", + "images/task_089_episode_36_280_cam_high.jpg", + "images/task_089_episode_36_283_cam_high.jpg", + "images/task_089_episode_36_286_cam_high.jpg", + "images/task_089_episode_36_289_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_089_episode_36_520_cam_high.jpg", + "images/task_089_episode_36_523_cam_high.jpg", + "images/task_089_episode_36_526_cam_high.jpg", + "images/task_089_episode_36_529_cam_high.jpg", + "images/task_089_episode_36_532_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_089_episode_36_342_cam_high.jpg", + "images/task_089_episode_36_345_cam_high.jpg", + "images/task_089_episode_36_348_cam_high.jpg", + "images/task_089_episode_36_351_cam_high.jpg", + "images/task_089_episode_36_354_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_089_episode_36_146_cam_high.jpg", + "images/task_089_episode_36_149_cam_high.jpg", + "images/task_089_episode_36_152_cam_high.jpg", + "images/task_089_episode_36_155_cam_high.jpg", + "images/task_089_episode_36_158_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Frame X happens before Frame Y" + } + ], + "images": [ + "images/task_089_episode_36_144_cam_high.jpg", + "images/task_089_episode_36_575_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Frame X happens before Frame Y" + } + ], + "images": [ + "images/task_089_episode_36_200_cam_high.jpg", + "images/task_089_episode_36_495_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Frame X happens before Frame Y" + } + ], + "images": [ + "images/task_089_episode_36_280_cam_high.jpg", + "images/task_089_episode_36_655_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_089_episode_36_505_cam_high.jpg", + "images/task_089_episode_36_463_cam_high.jpg", + "images/task_089_episode_36_421_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_089_episode_36_251_cam_high.jpg", + "images/task_089_episode_36_209_cam_high.jpg", + "images/task_089_episode_36_167_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_089_episode_36_759_cam_high.jpg", + "images/task_089_episode_36_674_cam_high.jpg", + "images/task_089_episode_36_717_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_089_episode_36_82_cam_high.jpg", + "images/task_089_episode_36_125_cam_high.jpg", + "images/task_089_episode_36_167_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_089_episode_36_590_cam_high.jpg", + "images/task_089_episode_36_548_cam_high.jpg", + "images/task_089_episode_36_632_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_089_episode_36_421_cam_high.jpg", + "images/task_089_episode_36_463_cam_high.jpg", + "images/task_089_episode_36_378_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_089_episode_36_294_cam_high.jpg", + "images/task_089_episode_36_336_cam_high.jpg", + "images/task_089_episode_36_378_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_089_episode_36_548_cam_high.jpg", + "images/task_089_episode_36_505_cam_high.jpg", + "images/task_089_episode_36_590_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_089_episode_36_463_cam_high.jpg", + "images/task_089_episode_36_505_cam_high.jpg", + "images/task_089_episode_36_548_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_089_episode_36_167_cam_high.jpg", + "images/task_089_episode_36_209_cam_high.jpg", + "images/task_089_episode_36_125_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_089_episode_36_632_cam_high.jpg", + "images/task_089_episode_36_590_cam_high.jpg", + "images/task_089_episode_36_674_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_36", + "recording_id": "task_089_episode_36", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_089_episode_42_0_cam_high.jpg", + "images/task_089_episode_42_15_cam_high.jpg", + "images/task_089_episode_42_30_cam_high.jpg", + "images/task_089_episode_42_45_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Up" + } + ], + "images": [ + "images/task_089_episode_42_0_cam_high.jpg", + "images/task_089_episode_42_15_cam_high.jpg", + "images/task_089_episode_42_30_cam_high.jpg", + "images/task_089_episode_42_45_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Left" + } + ], + "images": [ + "images/task_089_episode_42_6_cam_high.jpg", + "images/task_089_episode_42_21_cam_high.jpg", + "images/task_089_episode_42_36_cam_high.jpg", + "images/task_089_episode_42_51_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Left" + } + ], + "images": [ + "images/task_089_episode_42_12_cam_high.jpg", + "images/task_089_episode_42_27_cam_high.jpg", + "images/task_089_episode_42_42_cam_high.jpg", + "images/task_089_episode_42_57_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Left" + } + ], + "images": [ + "images/task_089_episode_42_18_cam_high.jpg", + "images/task_089_episode_42_33_cam_high.jpg", + "images/task_089_episode_42_48_cam_high.jpg", + "images/task_089_episode_42_63_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_089_episode_42_198_cam_high.jpg", + "images/task_089_episode_42_213_cam_high.jpg", + "images/task_089_episode_42_228_cam_high.jpg", + "images/task_089_episode_42_243_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_089_episode_42_204_cam_high.jpg", + "images/task_089_episode_42_219_cam_high.jpg", + "images/task_089_episode_42_234_cam_high.jpg", + "images/task_089_episode_42_249_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_089_episode_42_210_cam_high.jpg", + "images/task_089_episode_42_225_cam_high.jpg", + "images/task_089_episode_42_240_cam_high.jpg", + "images/task_089_episode_42_255_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Down" + } + ], + "images": [ + "images/task_089_episode_42_246_cam_high.jpg", + "images/task_089_episode_42_261_cam_high.jpg", + "images/task_089_episode_42_276_cam_high.jpg", + "images/task_089_episode_42_291_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Left\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Down" + } + ], + "images": [ + "images/task_089_episode_42_252_cam_high.jpg", + "images/task_089_episode_42_267_cam_high.jpg", + "images/task_089_episode_42_282_cam_high.jpg", + "images/task_089_episode_42_297_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Down" + } + ], + "images": [ + "images/task_089_episode_42_258_cam_high.jpg", + "images/task_089_episode_42_273_cam_high.jpg", + "images/task_089_episode_42_288_cam_high.jpg", + "images/task_089_episode_42_303_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Up" + } + ], + "images": [ + "images/task_089_episode_42_288_cam_high.jpg", + "images/task_089_episode_42_303_cam_high.jpg", + "images/task_089_episode_42_318_cam_high.jpg", + "images/task_089_episode_42_333_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Up" + } + ], + "images": [ + "images/task_089_episode_42_294_cam_high.jpg", + "images/task_089_episode_42_309_cam_high.jpg", + "images/task_089_episode_42_324_cam_high.jpg", + "images/task_089_episode_42_339_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_089_episode_42_336_cam_high.jpg", + "images/task_089_episode_42_351_cam_high.jpg", + "images/task_089_episode_42_366_cam_high.jpg", + "images/task_089_episode_42_381_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_089_episode_42_342_cam_high.jpg", + "images/task_089_episode_42_357_cam_high.jpg", + "images/task_089_episode_42_372_cam_high.jpg", + "images/task_089_episode_42_387_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Up" + } + ], + "images": [ + "images/task_089_episode_42_378_cam_high.jpg", + "images/task_089_episode_42_393_cam_high.jpg", + "images/task_089_episode_42_408_cam_high.jpg", + "images/task_089_episode_42_423_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Up" + } + ], + "images": [ + "images/task_089_episode_42_384_cam_high.jpg", + "images/task_089_episode_42_399_cam_high.jpg", + "images/task_089_episode_42_414_cam_high.jpg", + "images/task_089_episode_42_429_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Up" + } + ], + "images": [ + "images/task_089_episode_42_390_cam_high.jpg", + "images/task_089_episode_42_405_cam_high.jpg", + "images/task_089_episode_42_420_cam_high.jpg", + "images/task_089_episode_42_435_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Down" + } + ], + "images": [ + "images/task_089_episode_42_432_cam_high.jpg", + "images/task_089_episode_42_447_cam_high.jpg", + "images/task_089_episode_42_462_cam_high.jpg", + "images/task_089_episode_42_477_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Down" + } + ], + "images": [ + "images/task_089_episode_42_438_cam_high.jpg", + "images/task_089_episode_42_453_cam_high.jpg", + "images/task_089_episode_42_468_cam_high.jpg", + "images/task_089_episode_42_483_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Down" + } + ], + "images": [ + "images/task_089_episode_42_444_cam_high.jpg", + "images/task_089_episode_42_459_cam_high.jpg", + "images/task_089_episode_42_474_cam_high.jpg", + "images/task_089_episode_42_489_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_089_episode_42_375_cam_high.jpg", + "images/task_089_episode_42_390_cam_high.jpg", + "images/task_089_episode_42_405_cam_high.jpg", + "images/task_089_episode_42_420_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_089_episode_42_448_cam_high.jpg", + "images/task_089_episode_42_463_cam_high.jpg", + "images/task_089_episode_42_478_cam_high.jpg", + "images/task_089_episode_42_493_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_089_episode_42_167_cam_high.jpg", + "images/task_089_episode_42_182_cam_high.jpg", + "images/task_089_episode_42_197_cam_high.jpg", + "images/task_089_episode_42_212_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_089_episode_42_708_cam_high.jpg", + "images/task_089_episode_42_723_cam_high.jpg", + "images/task_089_episode_42_738_cam_high.jpg", + "images/task_089_episode_42_753_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_089_episode_42_302_cam_high.jpg", + "images/task_089_episode_42_317_cam_high.jpg", + "images/task_089_episode_42_332_cam_high.jpg", + "images/task_089_episode_42_347_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_089_episode_42_208_cam_high.jpg", + "images/task_089_episode_42_223_cam_high.jpg", + "images/task_089_episode_42_238_cam_high.jpg", + "images/task_089_episode_42_253_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_089_episode_42_94_cam_high.jpg", + "images/task_089_episode_42_109_cam_high.jpg", + "images/task_089_episode_42_124_cam_high.jpg", + "images/task_089_episode_42_139_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_089_episode_42_239_cam_high.jpg", + "images/task_089_episode_42_254_cam_high.jpg", + "images/task_089_episode_42_269_cam_high.jpg", + "images/task_089_episode_42_284_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_089_episode_42_10_cam_high.jpg", + "images/task_089_episode_42_25_cam_high.jpg", + "images/task_089_episode_42_40_cam_high.jpg", + "images/task_089_episode_42_55_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_089_episode_42_520_cam_high.jpg", + "images/task_089_episode_42_535_cam_high.jpg", + "images/task_089_episode_42_550_cam_high.jpg", + "images/task_089_episode_42_565_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_089_episode_42_416_cam_high.jpg", + "images/task_089_episode_42_431_cam_high.jpg", + "images/task_089_episode_42_446_cam_high.jpg", + "images/task_089_episode_42_461_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_089_episode_42_15_cam_high.jpg", + "images/task_089_episode_42_18_cam_high.jpg", + "images/task_089_episode_42_21_cam_high.jpg", + "images/task_089_episode_42_24_cam_high.jpg", + "images/task_089_episode_42_27_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_089_episode_42_641_cam_high.jpg", + "images/task_089_episode_42_644_cam_high.jpg", + "images/task_089_episode_42_647_cam_high.jpg", + "images/task_089_episode_42_650_cam_high.jpg", + "images/task_089_episode_42_653_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_089_episode_42_277_cam_high.jpg", + "images/task_089_episode_42_280_cam_high.jpg", + "images/task_089_episode_42_283_cam_high.jpg", + "images/task_089_episode_42_286_cam_high.jpg", + "images/task_089_episode_42_289_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_089_episode_42_585_cam_high.jpg", + "images/task_089_episode_42_588_cam_high.jpg", + "images/task_089_episode_42_591_cam_high.jpg", + "images/task_089_episode_42_594_cam_high.jpg", + "images/task_089_episode_42_597_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_089_episode_42_716_cam_high.jpg", + "images/task_089_episode_42_719_cam_high.jpg", + "images/task_089_episode_42_722_cam_high.jpg", + "images/task_089_episode_42_725_cam_high.jpg", + "images/task_089_episode_42_728_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_089_episode_42_370_cam_high.jpg", + "images/task_089_episode_42_373_cam_high.jpg", + "images/task_089_episode_42_376_cam_high.jpg", + "images/task_089_episode_42_379_cam_high.jpg", + "images/task_089_episode_42_382_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_089_episode_42_15_cam_high.jpg", + "images/task_089_episode_42_18_cam_high.jpg", + "images/task_089_episode_42_21_cam_high.jpg", + "images/task_089_episode_42_24_cam_high.jpg", + "images/task_089_episode_42_27_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_089_episode_42_314_cam_high.jpg", + "images/task_089_episode_42_317_cam_high.jpg", + "images/task_089_episode_42_320_cam_high.jpg", + "images/task_089_episode_42_323_cam_high.jpg", + "images/task_089_episode_42_326_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Frame Y happens before Frame X" + } + ], + "images": [ + "images/task_089_episode_42_575_cam_high.jpg", + "images/task_089_episode_42_144_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Frame X happens before Frame Y" + } + ], + "images": [ + "images/task_089_episode_42_200_cam_high.jpg", + "images/task_089_episode_42_495_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Frame Y happens before Frame X" + } + ], + "images": [ + "images/task_089_episode_42_655_cam_high.jpg", + "images/task_089_episode_42_280_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_089_episode_42_632_cam_high.jpg", + "images/task_089_episode_42_548_cam_high.jpg", + "images/task_089_episode_42_590_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_089_episode_42_336_cam_high.jpg", + "images/task_089_episode_42_378_cam_high.jpg", + "images/task_089_episode_42_421_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_089_episode_42_209_cam_high.jpg", + "images/task_089_episode_42_294_cam_high.jpg", + "images/task_089_episode_42_251_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_089_episode_42_336_cam_high.jpg", + "images/task_089_episode_42_251_cam_high.jpg", + "images/task_089_episode_42_294_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_089_episode_42_717_cam_high.jpg", + "images/task_089_episode_42_759_cam_high.jpg", + "images/task_089_episode_42_674_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_089_episode_42_167_cam_high.jpg", + "images/task_089_episode_42_209_cam_high.jpg", + "images/task_089_episode_42_125_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_089_episode_42_336_cam_high.jpg", + "images/task_089_episode_42_294_cam_high.jpg", + "images/task_089_episode_42_378_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_089_episode_42_125_cam_high.jpg", + "images/task_089_episode_42_40_cam_high.jpg", + "images/task_089_episode_42_82_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_089_episode_42_82_cam_high.jpg", + "images/task_089_episode_42_125_cam_high.jpg", + "images/task_089_episode_42_167_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_089_episode_42_590_cam_high.jpg", + "images/task_089_episode_42_674_cam_high.jpg", + "images/task_089_episode_42_632_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_089_episode_42_717_cam_high.jpg", + "images/task_089_episode_42_674_cam_high.jpg", + "images/task_089_episode_42_632_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_089_episode_42_251_cam_high.jpg", + "images/task_089_episode_42_167_cam_high.jpg", + "images/task_089_episode_42_209_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_089_episode_42_505_cam_high.jpg", + "images/task_089_episode_42_421_cam_high.jpg", + "images/task_089_episode_42_463_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_42", + "recording_id": "task_089_episode_42", + "episode_id": null, + "camera": "cam_high", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_089_episode_52_0_cam_high.jpg", + "images/task_089_episode_52_15_cam_high.jpg", + "images/task_089_episode_52_30_cam_high.jpg", + "images/task_089_episode_52_45_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Up" + } + ], + "images": [ + "images/task_089_episode_52_0_cam_high.jpg", + "images/task_089_episode_52_15_cam_high.jpg", + "images/task_089_episode_52_30_cam_high.jpg", + "images/task_089_episode_52_45_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Up" + } + ], + "images": [ + "images/task_089_episode_52_6_cam_high.jpg", + "images/task_089_episode_52_21_cam_high.jpg", + "images/task_089_episode_52_36_cam_high.jpg", + "images/task_089_episode_52_51_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Left" + } + ], + "images": [ + "images/task_089_episode_52_12_cam_high.jpg", + "images/task_089_episode_52_27_cam_high.jpg", + "images/task_089_episode_52_42_cam_high.jpg", + "images/task_089_episode_52_57_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Left" + } + ], + "images": [ + "images/task_089_episode_52_18_cam_high.jpg", + "images/task_089_episode_52_33_cam_high.jpg", + "images/task_089_episode_52_48_cam_high.jpg", + "images/task_089_episode_52_63_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Left" + } + ], + "images": [ + "images/task_089_episode_52_24_cam_high.jpg", + "images/task_089_episode_52_39_cam_high.jpg", + "images/task_089_episode_52_54_cam_high.jpg", + "images/task_089_episode_52_69_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_089_episode_52_186_cam_high.jpg", + "images/task_089_episode_52_201_cam_high.jpg", + "images/task_089_episode_52_216_cam_high.jpg", + "images/task_089_episode_52_231_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Left\nC. Down\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_089_episode_52_192_cam_high.jpg", + "images/task_089_episode_52_207_cam_high.jpg", + "images/task_089_episode_52_222_cam_high.jpg", + "images/task_089_episode_52_237_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Left\nC. Down\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_089_episode_52_198_cam_high.jpg", + "images/task_089_episode_52_213_cam_high.jpg", + "images/task_089_episode_52_228_cam_high.jpg", + "images/task_089_episode_52_243_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Down" + } + ], + "images": [ + "images/task_089_episode_52_228_cam_high.jpg", + "images/task_089_episode_52_243_cam_high.jpg", + "images/task_089_episode_52_258_cam_high.jpg", + "images/task_089_episode_52_273_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Down" + } + ], + "images": [ + "images/task_089_episode_52_234_cam_high.jpg", + "images/task_089_episode_52_249_cam_high.jpg", + "images/task_089_episode_52_264_cam_high.jpg", + "images/task_089_episode_52_279_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Down" + } + ], + "images": [ + "images/task_089_episode_52_240_cam_high.jpg", + "images/task_089_episode_52_255_cam_high.jpg", + "images/task_089_episode_52_270_cam_high.jpg", + "images/task_089_episode_52_285_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Up" + } + ], + "images": [ + "images/task_089_episode_52_270_cam_high.jpg", + "images/task_089_episode_52_285_cam_high.jpg", + "images/task_089_episode_52_300_cam_high.jpg", + "images/task_089_episode_52_315_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Up" + } + ], + "images": [ + "images/task_089_episode_52_276_cam_high.jpg", + "images/task_089_episode_52_291_cam_high.jpg", + "images/task_089_episode_52_306_cam_high.jpg", + "images/task_089_episode_52_321_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_089_episode_52_318_cam_high.jpg", + "images/task_089_episode_52_333_cam_high.jpg", + "images/task_089_episode_52_348_cam_high.jpg", + "images/task_089_episode_52_363_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_089_episode_52_324_cam_high.jpg", + "images/task_089_episode_52_339_cam_high.jpg", + "images/task_089_episode_52_354_cam_high.jpg", + "images/task_089_episode_52_369_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Up" + } + ], + "images": [ + "images/task_089_episode_52_366_cam_high.jpg", + "images/task_089_episode_52_381_cam_high.jpg", + "images/task_089_episode_52_396_cam_high.jpg", + "images/task_089_episode_52_411_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Up" + } + ], + "images": [ + "images/task_089_episode_52_372_cam_high.jpg", + "images/task_089_episode_52_387_cam_high.jpg", + "images/task_089_episode_52_402_cam_high.jpg", + "images/task_089_episode_52_417_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Down" + } + ], + "images": [ + "images/task_089_episode_52_420_cam_high.jpg", + "images/task_089_episode_52_435_cam_high.jpg", + "images/task_089_episode_52_450_cam_high.jpg", + "images/task_089_episode_52_465_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Right\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Down" + } + ], + "images": [ + "images/task_089_episode_52_426_cam_high.jpg", + "images/task_089_episode_52_441_cam_high.jpg", + "images/task_089_episode_52_456_cam_high.jpg", + "images/task_089_episode_52_471_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Down" + } + ], + "images": [ + "images/task_089_episode_52_432_cam_high.jpg", + "images/task_089_episode_52_447_cam_high.jpg", + "images/task_089_episode_52_462_cam_high.jpg", + "images/task_089_episode_52_477_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_089_episode_52_687_cam_high.jpg", + "images/task_089_episode_52_702_cam_high.jpg", + "images/task_089_episode_52_717_cam_high.jpg", + "images/task_089_episode_52_732_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_089_episode_52_666_cam_high.jpg", + "images/task_089_episode_52_681_cam_high.jpg", + "images/task_089_episode_52_696_cam_high.jpg", + "images/task_089_episode_52_711_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_089_episode_52_31_cam_high.jpg", + "images/task_089_episode_52_46_cam_high.jpg", + "images/task_089_episode_52_61_cam_high.jpg", + "images/task_089_episode_52_76_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_089_episode_52_500_cam_high.jpg", + "images/task_089_episode_52_515_cam_high.jpg", + "images/task_089_episode_52_530_cam_high.jpg", + "images/task_089_episode_52_545_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_089_episode_52_718_cam_high.jpg", + "images/task_089_episode_52_733_cam_high.jpg", + "images/task_089_episode_52_748_cam_high.jpg", + "images/task_089_episode_52_763_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_089_episode_52_364_cam_high.jpg", + "images/task_089_episode_52_379_cam_high.jpg", + "images/task_089_episode_52_394_cam_high.jpg", + "images/task_089_episode_52_409_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_089_episode_52_250_cam_high.jpg", + "images/task_089_episode_52_265_cam_high.jpg", + "images/task_089_episode_52_280_cam_high.jpg", + "images/task_089_episode_52_295_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_089_episode_52_0_cam_high.jpg", + "images/task_089_episode_52_15_cam_high.jpg", + "images/task_089_episode_52_30_cam_high.jpg", + "images/task_089_episode_52_45_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_089_episode_52_291_cam_high.jpg", + "images/task_089_episode_52_306_cam_high.jpg", + "images/task_089_episode_52_321_cam_high.jpg", + "images/task_089_episode_52_336_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_089_episode_52_552_cam_high.jpg", + "images/task_089_episode_52_567_cam_high.jpg", + "images/task_089_episode_52_582_cam_high.jpg", + "images/task_089_episode_52_597_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_089_episode_52_281_cam_high.jpg", + "images/task_089_episode_52_296_cam_high.jpg", + "images/task_089_episode_52_311_cam_high.jpg", + "images/task_089_episode_52_326_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_089_episode_52_21_cam_high.jpg", + "images/task_089_episode_52_36_cam_high.jpg", + "images/task_089_episode_52_51_cam_high.jpg", + "images/task_089_episode_52_66_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_089_episode_52_650_cam_high.jpg", + "images/task_089_episode_52_653_cam_high.jpg", + "images/task_089_episode_52_656_cam_high.jpg", + "images/task_089_episode_52_659_cam_high.jpg", + "images/task_089_episode_52_662_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_089_episode_52_323_cam_high.jpg", + "images/task_089_episode_52_326_cam_high.jpg", + "images/task_089_episode_52_329_cam_high.jpg", + "images/task_089_episode_52_332_cam_high.jpg", + "images/task_089_episode_52_335_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_089_episode_52_90_cam_high.jpg", + "images/task_089_episode_52_93_cam_high.jpg", + "images/task_089_episode_52_96_cam_high.jpg", + "images/task_089_episode_52_99_cam_high.jpg", + "images/task_089_episode_52_102_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_089_episode_52_501_cam_high.jpg", + "images/task_089_episode_52_504_cam_high.jpg", + "images/task_089_episode_52_507_cam_high.jpg", + "images/task_089_episode_52_510_cam_high.jpg", + "images/task_089_episode_52_513_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_089_episode_52_71_cam_high.jpg", + "images/task_089_episode_52_74_cam_high.jpg", + "images/task_089_episode_52_77_cam_high.jpg", + "images/task_089_episode_52_80_cam_high.jpg", + "images/task_089_episode_52_83_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_089_episode_52_202_cam_high.jpg", + "images/task_089_episode_52_205_cam_high.jpg", + "images/task_089_episode_52_208_cam_high.jpg", + "images/task_089_episode_52_211_cam_high.jpg", + "images/task_089_episode_52_214_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_089_episode_52_725_cam_high.jpg", + "images/task_089_episode_52_728_cam_high.jpg", + "images/task_089_episode_52_731_cam_high.jpg", + "images/task_089_episode_52_734_cam_high.jpg", + "images/task_089_episode_52_737_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_089_episode_52_81_cam_high.jpg", + "images/task_089_episode_52_84_cam_high.jpg", + "images/task_089_episode_52_87_cam_high.jpg", + "images/task_089_episode_52_90_cam_high.jpg", + "images/task_089_episode_52_93_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Frame Y happens before Frame X" + } + ], + "images": [ + "images/task_089_episode_52_575_cam_high.jpg", + "images/task_089_episode_52_144_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Frame Y happens before Frame X" + } + ], + "images": [ + "images/task_089_episode_52_495_cam_high.jpg", + "images/task_089_episode_52_200_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Frame X happens before Frame Y" + } + ], + "images": [ + "images/task_089_episode_52_280_cam_high.jpg", + "images/task_089_episode_52_655_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_089_episode_52_505_cam_high.jpg", + "images/task_089_episode_52_421_cam_high.jpg", + "images/task_089_episode_52_463_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_089_episode_52_548_cam_high.jpg", + "images/task_089_episode_52_463_cam_high.jpg", + "images/task_089_episode_52_505_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_089_episode_52_209_cam_high.jpg", + "images/task_089_episode_52_251_cam_high.jpg", + "images/task_089_episode_52_294_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_089_episode_52_632_cam_high.jpg", + "images/task_089_episode_52_548_cam_high.jpg", + "images/task_089_episode_52_590_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_089_episode_52_82_cam_high.jpg", + "images/task_089_episode_52_125_cam_high.jpg", + "images/task_089_episode_52_167_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_089_episode_52_209_cam_high.jpg", + "images/task_089_episode_52_167_cam_high.jpg", + "images/task_089_episode_52_251_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_089_episode_52_251_cam_high.jpg", + "images/task_089_episode_52_294_cam_high.jpg", + "images/task_089_episode_52_336_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_089_episode_52_336_cam_high.jpg", + "images/task_089_episode_52_294_cam_high.jpg", + "images/task_089_episode_52_378_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_089_episode_52_125_cam_high.jpg", + "images/task_089_episode_52_40_cam_high.jpg", + "images/task_089_episode_52_82_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_089_episode_52_674_cam_high.jpg", + "images/task_089_episode_52_717_cam_high.jpg", + "images/task_089_episode_52_759_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_089_episode_52_632_cam_high.jpg", + "images/task_089_episode_52_717_cam_high.jpg", + "images/task_089_episode_52_674_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_089_episode_52_378_cam_high.jpg", + "images/task_089_episode_52_421_cam_high.jpg", + "images/task_089_episode_52_463_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_52", + "recording_id": "task_089_episode_52", + "episode_id": null, + "camera": "cam_high", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Left" + } + ], + "images": [ + "images/task_089_episode_83_0_cam_high.jpg", + "images/task_089_episode_83_15_cam_high.jpg", + "images/task_089_episode_83_30_cam_high.jpg", + "images/task_089_episode_83_45_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Left" + } + ], + "images": [ + "images/task_089_episode_83_6_cam_high.jpg", + "images/task_089_episode_83_21_cam_high.jpg", + "images/task_089_episode_83_36_cam_high.jpg", + "images/task_089_episode_83_51_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Left" + } + ], + "images": [ + "images/task_089_episode_83_12_cam_high.jpg", + "images/task_089_episode_83_27_cam_high.jpg", + "images/task_089_episode_83_42_cam_high.jpg", + "images/task_089_episode_83_57_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Down\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_089_episode_83_180_cam_high.jpg", + "images/task_089_episode_83_195_cam_high.jpg", + "images/task_089_episode_83_210_cam_high.jpg", + "images/task_089_episode_83_225_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_089_episode_83_186_cam_high.jpg", + "images/task_089_episode_83_201_cam_high.jpg", + "images/task_089_episode_83_216_cam_high.jpg", + "images/task_089_episode_83_231_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_089_episode_83_192_cam_high.jpg", + "images/task_089_episode_83_207_cam_high.jpg", + "images/task_089_episode_83_222_cam_high.jpg", + "images/task_089_episode_83_237_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Down" + } + ], + "images": [ + "images/task_089_episode_83_222_cam_high.jpg", + "images/task_089_episode_83_237_cam_high.jpg", + "images/task_089_episode_83_252_cam_high.jpg", + "images/task_089_episode_83_267_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Down" + } + ], + "images": [ + "images/task_089_episode_83_228_cam_high.jpg", + "images/task_089_episode_83_243_cam_high.jpg", + "images/task_089_episode_83_258_cam_high.jpg", + "images/task_089_episode_83_273_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Down" + } + ], + "images": [ + "images/task_089_episode_83_234_cam_high.jpg", + "images/task_089_episode_83_249_cam_high.jpg", + "images/task_089_episode_83_264_cam_high.jpg", + "images/task_089_episode_83_279_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Up" + } + ], + "images": [ + "images/task_089_episode_83_264_cam_high.jpg", + "images/task_089_episode_83_279_cam_high.jpg", + "images/task_089_episode_83_294_cam_high.jpg", + "images/task_089_episode_83_309_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Up" + } + ], + "images": [ + "images/task_089_episode_83_270_cam_high.jpg", + "images/task_089_episode_83_285_cam_high.jpg", + "images/task_089_episode_83_300_cam_high.jpg", + "images/task_089_episode_83_315_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Up" + } + ], + "images": [ + "images/task_089_episode_83_276_cam_high.jpg", + "images/task_089_episode_83_291_cam_high.jpg", + "images/task_089_episode_83_306_cam_high.jpg", + "images/task_089_episode_83_321_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_089_episode_83_324_cam_high.jpg", + "images/task_089_episode_83_339_cam_high.jpg", + "images/task_089_episode_83_354_cam_high.jpg", + "images/task_089_episode_83_369_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Right" + } + ], + "images": [ + "images/task_089_episode_83_330_cam_high.jpg", + "images/task_089_episode_83_345_cam_high.jpg", + "images/task_089_episode_83_360_cam_high.jpg", + "images/task_089_episode_83_375_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_089_episode_83_336_cam_high.jpg", + "images/task_089_episode_83_351_cam_high.jpg", + "images/task_089_episode_83_366_cam_high.jpg", + "images/task_089_episode_83_381_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Left\nC. Down\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Up" + } + ], + "images": [ + "images/task_089_episode_83_366_cam_high.jpg", + "images/task_089_episode_83_381_cam_high.jpg", + "images/task_089_episode_83_396_cam_high.jpg", + "images/task_089_episode_83_411_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Left\nC. Down\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Up" + } + ], + "images": [ + "images/task_089_episode_83_372_cam_high.jpg", + "images/task_089_episode_83_387_cam_high.jpg", + "images/task_089_episode_83_402_cam_high.jpg", + "images/task_089_episode_83_417_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Up" + } + ], + "images": [ + "images/task_089_episode_83_378_cam_high.jpg", + "images/task_089_episode_83_393_cam_high.jpg", + "images/task_089_episode_83_408_cam_high.jpg", + "images/task_089_episode_83_423_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Up\nC. Right\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Down" + } + ], + "images": [ + "images/task_089_episode_83_414_cam_high.jpg", + "images/task_089_episode_83_429_cam_high.jpg", + "images/task_089_episode_83_444_cam_high.jpg", + "images/task_089_episode_83_459_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Right\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Down" + } + ], + "images": [ + "images/task_089_episode_83_420_cam_high.jpg", + "images/task_089_episode_83_435_cam_high.jpg", + "images/task_089_episode_83_450_cam_high.jpg", + "images/task_089_episode_83_465_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Up\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Down" + } + ], + "images": [ + "images/task_089_episode_83_426_cam_high.jpg", + "images/task_089_episode_83_441_cam_high.jpg", + "images/task_089_episode_83_456_cam_high.jpg", + "images/task_089_episode_83_471_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_089_episode_83_239_cam_high.jpg", + "images/task_089_episode_83_254_cam_high.jpg", + "images/task_089_episode_83_269_cam_high.jpg", + "images/task_089_episode_83_284_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_089_episode_83_635_cam_high.jpg", + "images/task_089_episode_83_650_cam_high.jpg", + "images/task_089_episode_83_665_cam_high.jpg", + "images/task_089_episode_83_680_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_089_episode_83_125_cam_high.jpg", + "images/task_089_episode_83_140_cam_high.jpg", + "images/task_089_episode_83_155_cam_high.jpg", + "images/task_089_episode_83_170_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_089_episode_83_489_cam_high.jpg", + "images/task_089_episode_83_504_cam_high.jpg", + "images/task_089_episode_83_519_cam_high.jpg", + "images/task_089_episode_83_534_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_089_episode_83_260_cam_high.jpg", + "images/task_089_episode_83_275_cam_high.jpg", + "images/task_089_episode_83_290_cam_high.jpg", + "images/task_089_episode_83_305_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_089_episode_83_208_cam_high.jpg", + "images/task_089_episode_83_223_cam_high.jpg", + "images/task_089_episode_83_238_cam_high.jpg", + "images/task_089_episode_83_253_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_089_episode_83_21_cam_high.jpg", + "images/task_089_episode_83_36_cam_high.jpg", + "images/task_089_episode_83_51_cam_high.jpg", + "images/task_089_episode_83_66_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_089_episode_83_73_cam_high.jpg", + "images/task_089_episode_83_88_cam_high.jpg", + "images/task_089_episode_83_103_cam_high.jpg", + "images/task_089_episode_83_118_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_089_episode_83_396_cam_high.jpg", + "images/task_089_episode_83_411_cam_high.jpg", + "images/task_089_episode_83_426_cam_high.jpg", + "images/task_089_episode_83_441_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_089_episode_83_468_cam_high.jpg", + "images/task_089_episode_83_483_cam_high.jpg", + "images/task_089_episode_83_498_cam_high.jpg", + "images/task_089_episode_83_513_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_089_episode_83_94_cam_high.jpg", + "images/task_089_episode_83_109_cam_high.jpg", + "images/task_089_episode_83_124_cam_high.jpg", + "images/task_089_episode_83_139_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_089_episode_83_286_cam_high.jpg", + "images/task_089_episode_83_289_cam_high.jpg", + "images/task_089_episode_83_292_cam_high.jpg", + "images/task_089_episode_83_295_cam_high.jpg", + "images/task_089_episode_83_298_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_089_episode_83_772_cam_high.jpg", + "images/task_089_episode_83_775_cam_high.jpg", + "images/task_089_episode_83_778_cam_high.jpg", + "images/task_089_episode_83_781_cam_high.jpg", + "images/task_089_episode_83_784_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_089_episode_83_90_cam_high.jpg", + "images/task_089_episode_83_93_cam_high.jpg", + "images/task_089_episode_83_96_cam_high.jpg", + "images/task_089_episode_83_99_cam_high.jpg", + "images/task_089_episode_83_102_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_089_episode_83_379_cam_high.jpg", + "images/task_089_episode_83_382_cam_high.jpg", + "images/task_089_episode_83_385_cam_high.jpg", + "images/task_089_episode_83_388_cam_high.jpg", + "images/task_089_episode_83_391_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_089_episode_83_109_cam_high.jpg", + "images/task_089_episode_83_112_cam_high.jpg", + "images/task_089_episode_83_115_cam_high.jpg", + "images/task_089_episode_83_118_cam_high.jpg", + "images/task_089_episode_83_121_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_089_episode_83_15_cam_high.jpg", + "images/task_089_episode_83_18_cam_high.jpg", + "images/task_089_episode_83_21_cam_high.jpg", + "images/task_089_episode_83_24_cam_high.jpg", + "images/task_089_episode_83_27_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_089_episode_83_389_cam_high.jpg", + "images/task_089_episode_83_392_cam_high.jpg", + "images/task_089_episode_83_395_cam_high.jpg", + "images/task_089_episode_83_398_cam_high.jpg", + "images/task_089_episode_83_401_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_089_episode_83_753_cam_high.jpg", + "images/task_089_episode_83_756_cam_high.jpg", + "images/task_089_episode_83_759_cam_high.jpg", + "images/task_089_episode_83_762_cam_high.jpg", + "images/task_089_episode_83_765_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Frame X happens before Frame Y" + } + ], + "images": [ + "images/task_089_episode_83_144_cam_high.jpg", + "images/task_089_episode_83_575_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Frame X happens before Frame Y" + } + ], + "images": [ + "images/task_089_episode_83_200_cam_high.jpg", + "images/task_089_episode_83_495_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Frame Y happens before Frame X" + } + ], + "images": [ + "images/task_089_episode_83_655_cam_high.jpg", + "images/task_089_episode_83_280_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_089_episode_83_674_cam_high.jpg", + "images/task_089_episode_83_717_cam_high.jpg", + "images/task_089_episode_83_759_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_089_episode_83_463_cam_high.jpg", + "images/task_089_episode_83_505_cam_high.jpg", + "images/task_089_episode_83_548_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_089_episode_83_590_cam_high.jpg", + "images/task_089_episode_83_674_cam_high.jpg", + "images/task_089_episode_83_632_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_089_episode_83_378_cam_high.jpg", + "images/task_089_episode_83_421_cam_high.jpg", + "images/task_089_episode_83_336_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_089_episode_83_590_cam_high.jpg", + "images/task_089_episode_83_548_cam_high.jpg", + "images/task_089_episode_83_505_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_089_episode_83_251_cam_high.jpg", + "images/task_089_episode_83_336_cam_high.jpg", + "images/task_089_episode_83_294_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_089_episode_83_590_cam_high.jpg", + "images/task_089_episode_83_632_cam_high.jpg", + "images/task_089_episode_83_548_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_089_episode_83_167_cam_high.jpg", + "images/task_089_episode_83_251_cam_high.jpg", + "images/task_089_episode_83_209_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_089_episode_83_82_cam_high.jpg", + "images/task_089_episode_83_125_cam_high.jpg", + "images/task_089_episode_83_167_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_089_episode_83_40_cam_high.jpg", + "images/task_089_episode_83_125_cam_high.jpg", + "images/task_089_episode_83_82_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_089_episode_83_294_cam_high.jpg", + "images/task_089_episode_83_378_cam_high.jpg", + "images/task_089_episode_83_336_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_089_episode_83_632_cam_high.jpg", + "images/task_089_episode_83_674_cam_high.jpg", + "images/task_089_episode_83_717_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_089_episode_83_463_cam_high.jpg", + "images/task_089_episode_83_421_cam_high.jpg", + "images/task_089_episode_83_505_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_83", + "recording_id": "task_089_episode_83", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_089_episode_99_0_cam_high.jpg", + "images/task_089_episode_99_15_cam_high.jpg", + "images/task_089_episode_99_30_cam_high.jpg", + "images/task_089_episode_99_45_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Right" + } + ], + "images": [ + "images/task_089_episode_99_0_cam_high.jpg", + "images/task_089_episode_99_15_cam_high.jpg", + "images/task_089_episode_99_30_cam_high.jpg", + "images/task_089_episode_99_45_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Up\nB. Left\nC. Down\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_089_episode_99_6_cam_high.jpg", + "images/task_089_episode_99_21_cam_high.jpg", + "images/task_089_episode_99_36_cam_high.jpg", + "images/task_089_episode_99_51_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Left\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_089_episode_99_12_cam_high.jpg", + "images/task_089_episode_99_27_cam_high.jpg", + "images/task_089_episode_99_42_cam_high.jpg", + "images/task_089_episode_99_57_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Left" + } + ], + "images": [ + "images/task_089_episode_99_12_cam_high.jpg", + "images/task_089_episode_99_27_cam_high.jpg", + "images/task_089_episode_99_42_cam_high.jpg", + "images/task_089_episode_99_57_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Left" + } + ], + "images": [ + "images/task_089_episode_99_18_cam_high.jpg", + "images/task_089_episode_99_33_cam_high.jpg", + "images/task_089_episode_99_48_cam_high.jpg", + "images/task_089_episode_99_63_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Left" + } + ], + "images": [ + "images/task_089_episode_99_24_cam_high.jpg", + "images/task_089_episode_99_39_cam_high.jpg", + "images/task_089_episode_99_54_cam_high.jpg", + "images/task_089_episode_99_69_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Up\nC. Left\nD. Right\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Right" + } + ], + "images": [ + "images/task_089_episode_99_198_cam_high.jpg", + "images/task_089_episode_99_213_cam_high.jpg", + "images/task_089_episode_99_228_cam_high.jpg", + "images/task_089_episode_99_243_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Right" + } + ], + "images": [ + "images/task_089_episode_99_204_cam_high.jpg", + "images/task_089_episode_99_219_cam_high.jpg", + "images/task_089_episode_99_234_cam_high.jpg", + "images/task_089_episode_99_249_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Down" + } + ], + "images": [ + "images/task_089_episode_99_240_cam_high.jpg", + "images/task_089_episode_99_255_cam_high.jpg", + "images/task_089_episode_99_270_cam_high.jpg", + "images/task_089_episode_99_285_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Down" + } + ], + "images": [ + "images/task_089_episode_99_246_cam_high.jpg", + "images/task_089_episode_99_261_cam_high.jpg", + "images/task_089_episode_99_276_cam_high.jpg", + "images/task_089_episode_99_291_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Down" + } + ], + "images": [ + "images/task_089_episode_99_252_cam_high.jpg", + "images/task_089_episode_99_267_cam_high.jpg", + "images/task_089_episode_99_282_cam_high.jpg", + "images/task_089_episode_99_297_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Up" + } + ], + "images": [ + "images/task_089_episode_99_282_cam_high.jpg", + "images/task_089_episode_99_297_cam_high.jpg", + "images/task_089_episode_99_312_cam_high.jpg", + "images/task_089_episode_99_327_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Right\nC. Left\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Up" + } + ], + "images": [ + "images/task_089_episode_99_288_cam_high.jpg", + "images/task_089_episode_99_303_cam_high.jpg", + "images/task_089_episode_99_318_cam_high.jpg", + "images/task_089_episode_99_333_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the right robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Up" + } + ], + "images": [ + "images/task_089_episode_99_294_cam_high.jpg", + "images/task_089_episode_99_309_cam_high.jpg", + "images/task_089_episode_99_324_cam_high.jpg", + "images/task_089_episode_99_339_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Right\nC. Up\nD. Down\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Up" + } + ], + "images": [ + "images/task_089_episode_99_390_cam_high.jpg", + "images/task_089_episode_99_405_cam_high.jpg", + "images/task_089_episode_99_420_cam_high.jpg", + "images/task_089_episode_99_435_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Up\nC. Down\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Up" + } + ], + "images": [ + "images/task_089_episode_99_396_cam_high.jpg", + "images/task_089_episode_99_411_cam_high.jpg", + "images/task_089_episode_99_426_cam_high.jpg", + "images/task_089_episode_99_441_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Left\nB. Down\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Up" + } + ], + "images": [ + "images/task_089_episode_99_402_cam_high.jpg", + "images/task_089_episode_99_417_cam_high.jpg", + "images/task_089_episode_99_432_cam_high.jpg", + "images/task_089_episode_99_447_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Down\nC. Up\nD. Left\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Down" + } + ], + "images": [ + "images/task_089_episode_99_438_cam_high.jpg", + "images/task_089_episode_99_453_cam_high.jpg", + "images/task_089_episode_99_468_cam_high.jpg", + "images/task_089_episode_99_483_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Right\nB. Left\nC. Down\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Down" + } + ], + "images": [ + "images/task_089_episode_99_444_cam_high.jpg", + "images/task_089_episode_99_459_cam_high.jpg", + "images/task_089_episode_99_474_cam_high.jpg", + "images/task_089_episode_99_489_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nIn which direction does the left robot arm primarily move across these time-ordered frames?\nOptions:\nA. Down\nB. Left\nC. Right\nD. Up\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Down" + } + ], + "images": [ + "images/task_089_episode_99_450_cam_high.jpg", + "images/task_089_episode_99_465_cam_high.jpg", + "images/task_089_episode_99_480_cam_high.jpg", + "images/task_089_episode_99_495_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T3", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_089_episode_99_500_cam_high.jpg", + "images/task_089_episode_99_515_cam_high.jpg", + "images/task_089_episode_99_530_cam_high.jpg", + "images/task_089_episode_99_545_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_089_episode_99_562_cam_high.jpg", + "images/task_089_episode_99_577_cam_high.jpg", + "images/task_089_episode_99_592_cam_high.jpg", + "images/task_089_episode_99_607_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "D. Both arms are idle or barely moving" + } + ], + "images": [ + "images/task_089_episode_99_739_cam_high.jpg", + "images/task_089_episode_99_754_cam_high.jpg", + "images/task_089_episode_99_769_cam_high.jpg", + "images/task_089_episode_99_784_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "D" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_089_episode_99_302_cam_high.jpg", + "images/task_089_episode_99_317_cam_high.jpg", + "images/task_089_episode_99_332_cam_high.jpg", + "images/task_089_episode_99_347_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_089_episode_99_94_cam_high.jpg", + "images/task_089_episode_99_109_cam_high.jpg", + "images/task_089_episode_99_124_cam_high.jpg", + "images/task_089_episode_99_139_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_089_episode_99_21_cam_high.jpg", + "images/task_089_episode_99_36_cam_high.jpg", + "images/task_089_episode_99_51_cam_high.jpg", + "images/task_089_episode_99_66_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_089_episode_99_406_cam_high.jpg", + "images/task_089_episode_99_421_cam_high.jpg", + "images/task_089_episode_99_436_cam_high.jpg", + "images/task_089_episode_99_451_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_089_episode_99_312_cam_high.jpg", + "images/task_089_episode_99_327_cam_high.jpg", + "images/task_089_episode_99_342_cam_high.jpg", + "images/task_089_episode_99_357_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "C. Only the right arm is actively moving" + } + ], + "images": [ + "images/task_089_episode_99_229_cam_high.jpg", + "images/task_089_episode_99_244_cam_high.jpg", + "images/task_089_episode_99_259_cam_high.jpg", + "images/task_089_episode_99_274_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "C" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_089_episode_99_635_cam_high.jpg", + "images/task_089_episode_99_650_cam_high.jpg", + "images/task_089_episode_99_665_cam_high.jpg", + "images/task_089_episode_99_680_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Both arms are actively moving" + } + ], + "images": [ + "images/task_089_episode_99_83_cam_high.jpg", + "images/task_089_episode_99_98_cam_high.jpg", + "images/task_089_episode_99_113_cam_high.jpg", + "images/task_089_episode_99_128_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 4\nAcross these time-ordered frames, which bimanual activity state best describes the robot?\nOptions:\nA. Both arms are actively moving\nB. Only the left arm is actively moving\nC. Only the right arm is actively moving\nD. Both arms are idle or barely moving\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Only the left arm is actively moving" + } + ], + "images": [ + "images/task_089_episode_99_385_cam_high.jpg", + "images/task_089_episode_99_400_cam_high.jpg", + "images/task_089_episode_99_415_cam_high.jpg", + "images/task_089_episode_99_430_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T4", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_089_episode_99_781_cam_high.jpg", + "images/task_089_episode_99_784_cam_high.jpg", + "images/task_089_episode_99_787_cam_high.jpg", + "images/task_089_episode_99_790_cam_high.jpg", + "images/task_089_episode_99_793_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_089_episode_99_716_cam_high.jpg", + "images/task_089_episode_99_719_cam_high.jpg", + "images/task_089_episode_99_722_cam_high.jpg", + "images/task_089_episode_99_725_cam_high.jpg", + "images/task_089_episode_99_728_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_089_episode_99_398_cam_high.jpg", + "images/task_089_episode_99_401_cam_high.jpg", + "images/task_089_episode_99_404_cam_high.jpg", + "images/task_089_episode_99_407_cam_high.jpg", + "images/task_089_episode_99_410_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the left robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_089_episode_99_520_cam_high.jpg", + "images/task_089_episode_99_523_cam_high.jpg", + "images/task_089_episode_99_526_cam_high.jpg", + "images/task_089_episode_99_529_cam_high.jpg", + "images/task_089_episode_99_532_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_089_episode_99_398_cam_high.jpg", + "images/task_089_episode_99_401_cam_high.jpg", + "images/task_089_episode_99_404_cam_high.jpg", + "images/task_089_episode_99_407_cam_high.jpg", + "images/task_089_episode_99_410_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Stationary (the queried arm is still or barely moving)" + } + ], + "images": [ + "images/task_089_episode_99_370_cam_high.jpg", + "images/task_089_episode_99_373_cam_high.jpg", + "images/task_089_episode_99_376_cam_high.jpg", + "images/task_089_episode_99_379_cam_high.jpg", + "images/task_089_episode_99_382_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_089_episode_99_239_cam_high.jpg", + "images/task_089_episode_99_242_cam_high.jpg", + "images/task_089_episode_99_245_cam_high.jpg", + "images/task_089_episode_99_248_cam_high.jpg", + "images/task_089_episode_99_251_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 5\nAcross these time-ordered frames, is the right robot arm actively moving or stationary?\nOptions:\nA. Stationary (the queried arm is still or barely moving)\nB. Actively moving (the queried arm is clearly in motion)\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Actively moving (the queried arm is clearly in motion)" + } + ], + "images": [ + "images/task_089_episode_99_71_cam_high.jpg", + "images/task_089_episode_99_74_cam_high.jpg", + "images/task_089_episode_99_77_cam_high.jpg", + "images/task_089_episode_99_80_cam_high.jpg", + "images/task_089_episode_99_83_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T6", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "A. Frame X happens before Frame Y" + } + ], + "images": [ + "images/task_089_episode_99_144_cam_high.jpg", + "images/task_089_episode_99_575_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "A" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Frame Y happens before Frame X" + } + ], + "images": [ + "images/task_089_episode_99_495_cam_high.jpg", + "images/task_089_episode_99_200_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 2\nWhich displayed frame happened earlier in the demonstration?\nOptions:\nA. Frame X happens before Frame Y\nB. Frame Y happens before Frame X\nAnswer with the option label and text." + }, + { + "from": "gpt", + "value": "B. Frame Y happens before Frame X" + } + ], + "images": [ + "images/task_089_episode_99_655_cam_high.jpg", + "images/task_089_episode_99_280_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T9", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "B" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_089_episode_99_632_cam_high.jpg", + "images/task_089_episode_99_674_cam_high.jpg", + "images/task_089_episode_99_717_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_089_episode_99_463_cam_high.jpg", + "images/task_089_episode_99_505_cam_high.jpg", + "images/task_089_episode_99_548_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_089_episode_99_759_cam_high.jpg", + "images/task_089_episode_99_674_cam_high.jpg", + "images/task_089_episode_99_717_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YZX" + } + ], + "images": [ + "images/task_089_episode_99_40_cam_high.jpg", + "images/task_089_episode_99_82_cam_high.jpg", + "images/task_089_episode_99_125_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "YZX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XZY" + } + ], + "images": [ + "images/task_089_episode_99_590_cam_high.jpg", + "images/task_089_episode_99_548_cam_high.jpg", + "images/task_089_episode_99_632_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "XZY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "XYZ" + } + ], + "images": [ + "images/task_089_episode_99_251_cam_high.jpg", + "images/task_089_episode_99_336_cam_high.jpg", + "images/task_089_episode_99_294_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "XYZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZXY" + } + ], + "images": [ + "images/task_089_episode_99_336_cam_high.jpg", + "images/task_089_episode_99_294_cam_high.jpg", + "images/task_089_episode_99_378_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "ZXY" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_089_episode_99_209_cam_high.jpg", + "images/task_089_episode_99_294_cam_high.jpg", + "images/task_089_episode_99_251_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_089_episode_99_378_cam_high.jpg", + "images/task_089_episode_99_336_cam_high.jpg", + "images/task_089_episode_99_421_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "ZYX" + } + ], + "images": [ + "images/task_089_episode_99_421_cam_high.jpg", + "images/task_089_episode_99_378_cam_high.jpg", + "images/task_089_episode_99_463_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "ZYX" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_089_episode_99_167_cam_high.jpg", + "images/task_089_episode_99_251_cam_high.jpg", + "images/task_089_episode_99_209_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "YXZ" + }, + { + "conversations": [ + { + "from": "human", + "value": "Number of images: 3\nOrder these three frames from earliest to latest in the manipulation sequence.\nAnswer briefly." + }, + { + "from": "gpt", + "value": "YXZ" + } + ], + "images": [ + "images/task_089_episode_99_463_cam_high.jpg", + "images/task_089_episode_99_421_cam_high.jpg", + "images/task_089_episode_99_505_cam_high.jpg" + ], + "dataset": "AIST-Bimanual", + "source_dataset": "AIST", + "task_type": "T_temporal", + "task_id": "task_089", + "split": "eval", + "split_group_id": "task_089_episode_99", + "recording_id": "task_089_episode_99", + "episode_id": null, + "camera": "cam_high", + "answer": "YXZ" + } +] \ No newline at end of file