| ## Load in Python | |
| import pandas as pd | |
| from pathlib import Path | |
| import cv2 | |
| # annotations | |
| csv_path = "annotations.csv" | |
| df = pd.read_csv(csv_path) | |
| # view first rows | |
| print(df.head()) | |
| # access a clip | |
| row = df.iloc[0] | |
| video_file = Path(row.video_path) | |
| # basic frame iterator | |
| cap = cv2.VideoCapture(str(video_file)) | |
| frames = [] | |
| while True: | |
| ok, frame = cap.read() | |
| if not ok: | |
| break | |
| frames.append(frame) | |
| cap.release() | |
| print("frames:", len(frames)) | |
| print("object_class:", row.object_class) | |
| print("container:", row.container_type) | |
| print("outcome:", row.outcome) | |
| count clips by container_type | |
| filter outcomes to find failure clusters | |
| group by persistence to test off-frame behavior | |
| sample occlusion ranges for tests | |
| does grip outcome correlate with container? | |
| do mis-grips cluster near occlusion? | |
| does persistence help reduce false resets? | |
| can baseline models handle this without spatial fields? | |