diff --git a/source_code/sam3/assets/veval/toy_gt_and_pred/toy_saco_veval_sav_test_eval_res.json b/source_code/sam3/assets/veval/toy_gt_and_pred/toy_saco_veval_sav_test_eval_res.json new file mode 100644 index 0000000000000000000000000000000000000000..12c0ff7083e3da9052360188df46ebee9cb4d5c0 --- /dev/null +++ b/source_code/sam3/assets/veval/toy_gt_and_pred/toy_saco_veval_sav_test_eval_res.json @@ -0,0 +1 @@ +{"dataset_results": {"video_bbox_mAP_50_95": 0.25926449787835926, "video_mask_mAP_50_95": 0.34191419141914187, "video_bbox_phrase_ap_50_95": 0.1371423984673839, "video_bbox_phrase_ap_50": 0.3000915217572178, "video_bbox_phrase_ap_75": 0.08651579443658651, "video_mask_phrase_ap_50_95": 0.1935287184180603, "video_mask_phrase_ap_50": 0.3513242920930748, "video_mask_phrase_ap_75": 0.18776520509193778, "video_mask_teta": 50.647, "video_mask_loc_a": 50.969, "video_mask_assoc_a": 49.191, "video_mask_cls_a": 51.781, "video_mask_loc_re": 53.646, "video_mask_loc_pr": 59.689, "video_mask_assoc_re": 53.932, "video_mask_assoc_pr": 55.295, "video_mask_cls_re": 69.966, "video_mask_cls_pr": 51.792, "video_bbox_all_phrase_HOTA": 0.45019177261810006, "video_bbox_all_phrase_DetA": 0.3065446597162029, "video_bbox_all_phrase_AssA": 0.6620514646913795, "video_bbox_all_phrase_DetRe": 0.34337909467405614, "video_bbox_all_phrase_DetPr": 0.672412162481676, "video_bbox_all_phrase_AssRe": 0.7418235522588219, "video_bbox_all_phrase_AssPr": 0.7588797497822176, "video_bbox_all_phrase_LocA": 0.8264969431924951, "video_bbox_all_phrase_OWTA": 0.4767458776352726, "video_mask_all_phrase_HOTA": 0.4656079602771566, "video_mask_all_phrase_DetA": 0.31732796518831874, "video_mask_all_phrase_AssA": 0.6843555199075559, "video_mask_all_phrase_DetRe": 0.35291773825479045, "video_mask_all_phrase_DetPr": 0.6910909348843808, "video_mask_all_phrase_AssRe": 0.7573349011890161, "video_mask_all_phrase_AssPr": 0.7716315286320553, "video_mask_all_phrase_LocA": 0.8328852506118952, "video_mask_all_phrase_OWTA": 0.4913272309590218, "video_bbox_demo_precision_50_95": 0.3349983250083749, "video_bbox_demo_recall_50_95": 0.20302968778882485, "video_bbox_demo_f1_50_95": 0.2527916520815702, "video_bbox_demo_precision_50": 0.6499967500162499, "video_bbox_demo_recall_50": 0.3939382001872721, "video_bbox_demo_f1_50": 0.49051719921648634, "video_bbox_demo_precision_75": 0.29999850000749995, "video_bbox_demo_recall_75": 0.18181763085566405, "video_bbox_demo_f1_75": 0.2263672578625684, "video_bbox_demo_pmf1_50_95": 0.34412433298399697, "video_bbox_demo_ilmcc_50_95": 0.6546535992794136, "video_bbox_demo_cgf1_50_95": 0.22528223318760104, "video_bbox_demo_pmf1_w0dt_50_95": 0.29496371398628307, "video_bbox_demo_cgf1_w0dt_50_95": 0.19309905701794375, "video_bbox_demo_positive_micro_f1_50_95": 0.2527916520815702, "video_bbox_demo_cgf1_micro_50_95": 0.1654909649029892, "video_bbox_demo_pmf1_50": 0.6499302549834214, "video_bbox_demo_ilmcc_50": 0.6546535992794136, "video_bbox_demo_cgf1_50": 0.42547918070548385, "video_bbox_demo_positive_micro_f1_50": 0.49051719921648634, "video_bbox_demo_cgf1_micro_50": 0.3211188499755299, "video_bbox_demo_pmf1_75": 0.3666121729955099, "video_bbox_demo_ilmcc_75": 0.6546535992794136, "video_bbox_demo_cgf1_75": 0.2400039785911576, "video_bbox_demo_positive_micro_f1_75": 0.2263672578625684, "video_bbox_demo_cgf1_micro_75": 0.14819214011874154, "video_mask_demo_precision_50_95": 0.4049979750101249, "video_mask_demo_recall_50_95": 0.24545380165514646, "video_mask_demo_f1_50_95": 0.30562163642146994, "video_mask_demo_precision_50": 0.6999965000174999, "video_mask_demo_recall_50": 0.4242411386632161, "video_mask_demo_f1_50": 0.5282529055527269, "video_mask_demo_precision_75": 0.39999800000999997, "video_mask_demo_recall_75": 0.24242350780755206, "video_mask_demo_f1_75": 0.3018386687406093, "video_mask_demo_pmf1_50_95": 0.42578319170459134, "video_mask_demo_ilmcc_50_95": 0.6546535992794136, "video_mask_demo_cgf1_50_95": 0.2787404989620873, "video_mask_demo_pmf1_w0dt_50_95": 0.3649570214610783, "video_mask_demo_cgf1_w0dt_50_95": 0.23892042768178906, "video_mask_demo_positive_micro_f1_50_95": 0.30562163642146994, "video_mask_demo_cgf1_micro_50_95": 0.2000763043009796, "video_mask_demo_pmf1_50": 0.691595879731724, "video_mask_demo_ilmcc_50": 0.6546535992794136, "video_mask_demo_cgf1_50": 0.4527557319131855, "video_mask_demo_positive_micro_f1_50": 0.5282529055527269, "video_mask_demo_cgf1_micro_50": 0.34582266594990074, "video_mask_demo_pmf1_75": 0.44161046360299766, "video_mask_demo_ilmcc_75": 0.6546535992794136, "video_mask_demo_cgf1_75": 0.2891018794771529, "video_mask_demo_positive_micro_f1_75": 0.3018386687406093, "video_mask_demo_cgf1_micro_75": 0.19759977089274647}, "video_np_results": [{"video_id": 0, "category_id": 847, "bbox_HOTA": 0.5142922923345529, "bbox_DetA": 0.4728133947307452, "bbox_AssA": 0.5594614107579609, "bbox_DetRe": 0.5232999318999904, "bbox_DetPr": 0.6740601503759398, "bbox_AssRe": 0.6250872094372617, "bbox_AssPr": 0.6825128274969564, "bbox_LocA": 0.7656278351108177, "bbox_OWTA": 0.5404796613713161, "mask_HOTA": 0.5531074184495137, "mask_DetA": 0.5015830888530848, "mask_AssA": 0.6104397175171865, "mask_DetRe": 0.5479132211304601, "mask_DetPr": 0.7057644110275689, "mask_AssRe": 0.6703397116425474, "mask_AssPr": 0.7231025840584305, "mask_LocA": 0.7775805526027232, "mask_OWTA": 0.5779237956098605, "bbox_TP_50_95": 0.8, "bbox_FP_50_95": 3.2, "bbox_FN_50_95": 3.2, "bbox_F1_50_95": 0.2, "bbox_TP_50": 2.0, "bbox_FP_50": 2.0, "bbox_FN_50": 2.0, "bbox_F1_50": 0.5, "bbox_TP_75": 0.0, "bbox_FP_75": 4.0, "bbox_FN_75": 4.0, "bbox_F1_75": 0.0, "mask_TP_50_95": 1.4, "mask_FP_50_95": 2.6, "mask_FN_50_95": 2.6, "mask_F1_50_95": 0.35, "mask_TP_50": 3.0, "mask_FP_50": 1.0, "mask_FN_50": 1.0, "mask_F1_50": 0.75, "mask_TP_75": 0.0, "mask_FP_75": 4.0, "mask_FN_75": 4.0, "mask_F1_75": 0.0}, {"video_id": 0, "category_id": 1390, "bbox_HOTA": 0.7406733177321637, "bbox_DetA": 0.7406733177321637, "bbox_AssA": 0.7406733177321637, "bbox_DetRe": 0.8079854809437387, "bbox_DetPr": 0.8428625520636122, "bbox_AssRe": 0.8079854809437387, "bbox_AssPr": 0.8428625520636122, "bbox_LocA": 0.8823614751992302, "bbox_OWTA": 0.7727325543244231, "mask_HOTA": 0.8154208948182305, "mask_DetA": 0.8154208948182305, "mask_AssA": 0.8154208948182304, "mask_DetRe": 0.8508166969147005, "mask_DetPr": 0.8875425975009466, "mask_AssRe": 0.8508166969147005, "mask_AssPr": 0.8875425975009466, "mask_LocA": 0.8885850303003239, "mask_OWTA": 0.8323023646534773, "bbox_TP_50_95": 0.6, "bbox_FP_50_95": 0.4, "bbox_FN_50_95": 0.4, "bbox_F1_50_95": 0.6, "bbox_TP_50": 1.0, "bbox_FP_50": 0.0, "bbox_FN_50": 0.0, "bbox_F1_50": 1.0, "bbox_TP_75": 1.0, "bbox_FP_75": 0.0, "bbox_FN_75": 0.0, "bbox_F1_75": 1.0, "mask_TP_50_95": 0.8, "mask_FP_50_95": 0.2, "mask_FN_50_95": 0.2, "mask_F1_50_95": 0.8, "mask_TP_50": 1.0, "mask_FP_50": 0.0, "mask_FN_50": 0.0, "mask_F1_50": 1.0, "mask_TP_75": 1.0, "mask_FP_75": 0.0, "mask_FN_75": 0.0, "mask_F1_75": 1.0}, {"video_id": 0, "category_id": 1985, "bbox_HOTA": 0.6033247555682881, "bbox_DetA": 0.582364339356503, "bbox_AssA": 0.6261229142494006, "bbox_DetRe": 0.731940716536352, "bbox_DetPr": 0.6764452113891285, "bbox_AssRe": 0.7595503987929015, "bbox_AssPr": 0.7140399793949845, "bbox_LocA": 0.8359470620317413, "bbox_OWTA": 0.6767785755234768, "mask_HOTA": 0.6327138779062043, "mask_DetA": 0.607379512629533, "mask_AssA": 0.661378852549867, "mask_DetRe": 0.7515462714435757, "mask_DetPr": 0.6945642795513374, "mask_AssRe": 0.7849752407175019, "mask_AssPr": 0.7406062552585059, "mask_LocA": 0.8390964342806022, "mask_OWTA": 0.70480310037791, "bbox_TP_50_95": 1.5, "bbox_FP_50_95": 2.5, "bbox_FN_50_95": 2.5, "bbox_F1_50_95": 0.375, "bbox_TP_50": 4.0, "bbox_FP_50": 0.0, "bbox_FN_50": 0.0, "bbox_F1_50": 1.0, "bbox_TP_75": 1.0, "bbox_FP_75": 3.0, "bbox_FN_75": 3.0, "bbox_F1_75": 0.25, "mask_TP_50_95": 1.7, "mask_FP_50_95": 2.3, "mask_FN_50_95": 2.3, "mask_F1_50_95": 0.425, "mask_TP_50": 4.0, "mask_FP_50": 0.0, "mask_FN_50": 0.0, "mask_F1_50": 1.0, "mask_TP_75": 1.0, "mask_FP_75": 3.0, "mask_FN_75": 3.0, "mask_F1_75": 0.25}, {"video_id": 0, "category_id": 3802, "bbox_HOTA": 0.47649622572399936, "bbox_DetA": 0.433826727090632, "bbox_AssA": 0.5248504404498464, "bbox_DetRe": 0.6931060044477392, "bbox_DetPr": 0.49540798304486033, "bbox_AssRe": 0.5970635788657445, "bbox_AssPr": 0.6521277350832161, "bbox_LocA": 0.8179341001135264, "bbox_OWTA": 0.6029168695170898, "mask_HOTA": 0.48643902599730804, "mask_DetA": 0.45408850965718345, "mask_AssA": 0.5231400608341074, "mask_DetRe": 0.7197924388435877, "mask_DetPr": 0.5144825150123632, "mask_AssRe": 0.5970712271676157, "mask_AssPr": 0.6408536817832409, "mask_LocA": 0.8335384601792232, "mask_OWTA": 0.613243699873648, "bbox_TP_50_95": 1.2, "bbox_FP_50_95": 4.8, "bbox_FN_50_95": 2.8, "bbox_F1_50_95": 0.24000000000000005, "bbox_TP_50": 2.0, "bbox_FP_50": 4.0, "bbox_FN_50": 2.0, "bbox_F1_50": 0.4, "bbox_TP_75": 1.0, "bbox_FP_75": 5.0, "bbox_FN_75": 3.0, "bbox_F1_75": 0.2, "mask_TP_50_95": 1.4, "mask_FP_50_95": 4.6, "mask_FN_50_95": 2.6, "mask_F1_50_95": 0.28, "mask_TP_50": 2.0, "mask_FP_50": 4.0, "mask_FN_50": 2.0, "mask_F1_50": 0.4, "mask_TP_75": 2.0, "mask_FP_75": 4.0, "mask_FN_75": 2.0, "mask_F1_75": 0.4}, {"video_id": 0, "category_id": 3827, "bbox_HOTA": 0.7782356487108129, "bbox_DetA": 0.7707506578022777, "bbox_AssA": 0.7864050337062197, "bbox_DetRe": 0.8251451746704767, "bbox_DetPr": 0.8309663046505151, "bbox_AssRe": 0.8370220441715713, "bbox_AssPr": 0.8427028310094968, "bbox_LocA": 0.8515357797457908, "bbox_OWTA": 0.8052026625060156, "mask_HOTA": 0.7841937402779418, "mask_DetA": 0.7773711399985175, "mask_AssA": 0.7919510612320402, "mask_DetRe": 0.8283712784588442, "mask_DetPr": 0.8342151675485008, "mask_AssRe": 0.8407743682610512, "mask_AssPr": 0.845985151807301, "mask_LocA": 0.851867149412845, "mask_OWTA": 0.8096925025911031, "bbox_TP_50_95": 2.6, "bbox_FP_50_95": 1.4, "bbox_FN_50_95": 1.4, "bbox_F1_50_95": 0.65, "bbox_TP_50": 4.0, "bbox_FP_50": 0.0, "bbox_FN_50": 0.0, "bbox_F1_50": 1.0, "bbox_TP_75": 3.0, "bbox_FP_75": 1.0, "bbox_FN_75": 1.0, "bbox_F1_75": 0.75, "mask_TP_50_95": 2.8, "mask_FP_50_95": 1.2, "mask_FN_50_95": 1.2, "mask_F1_50_95": 0.7, "mask_TP_50": 4.0, "mask_FP_50": 0.0, "mask_FN_50": 0.0, "mask_F1_50": 1.0, "mask_TP_75": 4.0, "mask_FP_75": 0.0, "mask_FN_75": 0.0, "mask_F1_75": 1.0}, {"video_id": 0, "category_id": 49272, "bbox_HOTA": 0.0, "bbox_DetA": 0.0, "bbox_AssA": 0.0, "bbox_DetRe": 0.0, "bbox_DetPr": 0.0, "bbox_AssRe": 0.0, "bbox_AssPr": 0.0, "bbox_LocA": 1.0, "bbox_OWTA": 0.0, "mask_HOTA": 0.0, "mask_DetA": 0.0, "mask_AssA": 0.0, "mask_DetRe": 0.0, "mask_DetPr": 0.0, "mask_AssRe": 0.0, "mask_AssPr": 0.0, "mask_LocA": 1.0, "mask_OWTA": 0.0, "bbox_TP_50_95": 0.0, "bbox_FP_50_95": 0.0, "bbox_FN_50_95": 12.0, "bbox_F1_50_95": 0.0, "bbox_TP_50": 0.0, "bbox_FP_50": 0.0, "bbox_FN_50": 12.0, "bbox_F1_50": 0.0, "bbox_TP_75": 0.0, "bbox_FP_75": 0.0, "bbox_FN_75": 12.0, "bbox_F1_75": 0.0, "mask_TP_50_95": 0.0, "mask_FP_50_95": 0.0, "mask_FN_50_95": 12.0, "mask_F1_50_95": 0.0, "mask_TP_50": 0.0, "mask_FP_50": 0.0, "mask_FN_50": 12.0, "mask_F1_50": 0.0, "mask_TP_75": 0.0, "mask_FP_75": 0.0, "mask_FN_75": 12.0, "mask_F1_75": 0.0}, {"video_id": 0, "category_id": 49504, "bbox_HOTA": 0.0, "bbox_DetA": 0.0, "bbox_AssA": 0.0, "bbox_DetRe": 0.0, "bbox_DetPr": 0.0, "bbox_AssRe": 0.0, "bbox_AssPr": 0.0, "bbox_LocA": 1.0, "bbox_OWTA": 0.0, "mask_HOTA": 0.0, "mask_DetA": 0.0, "mask_AssA": 0.0, "mask_DetRe": 0.0, "mask_DetPr": 0.0, "mask_AssRe": 0.0, "mask_AssPr": 0.0, "mask_LocA": 1.0, "mask_OWTA": 0.0, "bbox_TP_50_95": 0.0, "bbox_FP_50_95": 1.0, "bbox_FN_50_95": 4.0, "bbox_F1_50_95": 0.0, "bbox_TP_50": 0.0, "bbox_FP_50": 1.0, "bbox_FN_50": 4.0, "bbox_F1_50": 0.0, "bbox_TP_75": 0.0, "bbox_FP_75": 1.0, "bbox_FN_75": 4.0, "bbox_F1_75": 0.0, "mask_TP_50_95": 0.0, "mask_FP_50_95": 1.0, "mask_FN_50_95": 4.0, "mask_F1_50_95": 0.0, "mask_TP_50": 0.0, "mask_FP_50": 1.0, "mask_FN_50": 4.0, "mask_F1_50": 0.0, "mask_TP_75": 0.0, "mask_FP_75": 1.0, "mask_FN_75": 4.0, "mask_F1_75": 0.0}, {"video_id": 0, "category_id": 50554, "bbox_TP_50_95": 0.0, "bbox_FP_50_95": 0.0, "bbox_FN_50_95": 0.0, "bbox_F1_50_95": 1.0, "bbox_TP_50": 0.0, "bbox_FP_50": 0.0, "bbox_FN_50": 0.0, "bbox_F1_50": 1.0, "bbox_TP_75": 0.0, "bbox_FP_75": 0.0, "bbox_FN_75": 0.0, "bbox_F1_75": 1.0, "mask_TP_50_95": 0.0, "mask_FP_50_95": 0.0, "mask_FN_50_95": 0.0, "mask_F1_50_95": 1.0, "mask_TP_50": 0.0, "mask_FP_50": 0.0, "mask_FN_50": 0.0, "mask_F1_50": 1.0, "mask_TP_75": 0.0, "mask_FP_75": 0.0, "mask_FN_75": 0.0, "mask_F1_75": 1.0}]} \ No newline at end of file diff --git a/source_code/sam3/examples/saco_gold_silver_eval_example.ipynb b/source_code/sam3/examples/saco_gold_silver_eval_example.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..c5ae4dae4be2364b2436de3b8478160a01c1dd18 --- /dev/null +++ b/source_code/sam3/examples/saco_gold_silver_eval_example.ipynb @@ -0,0 +1,2214 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "417b89e9", + "metadata": {}, + "outputs": [], + "source": [ + "# Copyright (c) Meta Platforms, Inc. and affiliates." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "0e0d2e74", + "metadata": {}, + "outputs": [], + "source": [ + "import copy\n", + "import json\n", + "import os\n", + "\n", + "import numpy as np\n", + "\n", + "from pycocotools.coco import COCO\n", + "from sam3.eval.cgf1_eval import CGF1Evaluator" + ] + }, + { + "cell_type": "markdown", + "id": "1ceba210-cb61-4998-a153-c13c612e6182", + "metadata": {}, + "source": [ + "# SA-Co/Gold" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "b31ab5d3", + "metadata": {}, + "outputs": [], + "source": [ + "# Update to the directory where the GT annotation and PRED files exist\n", + "GT_DIR = # PUT YOUR PATH HERE\n", + "PRED_DIR = # PUT YOUR PATH HERE" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "25613248", + "metadata": {}, + "outputs": [], + "source": [ + "# Relative file names for GT files for 7 SA-Co/Gold subsets\n", + "saco_gold_gts = {\n", + " # MetaCLIP Captioner\n", + " \"metaclip_nps\": [\n", + " \"gold_metaclip_merged_a_release_test.json\",\n", + " \"gold_metaclip_merged_b_release_test.json\",\n", + " \"gold_metaclip_merged_c_release_test.json\",\n", + " ],\n", + " # SA-1B captioner\n", + " \"sa1b_nps\": [\n", + " \"gold_sa1b_merged_a_release_test.json\",\n", + " \"gold_sa1b_merged_b_release_test.json\",\n", + " \"gold_sa1b_merged_c_release_test.json\",\n", + " ],\n", + " # Crowded\n", + " \"crowded\": [\n", + " \"gold_crowded_merged_a_release_test.json\",\n", + " \"gold_crowded_merged_b_release_test.json\",\n", + " \"gold_crowded_merged_c_release_test.json\",\n", + " ],\n", + " # FG Food\n", + " \"fg_food\": [\n", + " \"gold_fg_food_merged_a_release_test.json\",\n", + " \"gold_fg_food_merged_b_release_test.json\",\n", + " \"gold_fg_food_merged_c_release_test.json\",\n", + " ],\n", + " # FG Sports\n", + " \"fg_sports_equipment\": [\n", + " \"gold_fg_sports_equipment_merged_a_release_test.json\",\n", + " \"gold_fg_sports_equipment_merged_b_release_test.json\",\n", + " \"gold_fg_sports_equipment_merged_c_release_test.json\",\n", + " ],\n", + " # Attributes\n", + " \"attributes\": [\n", + " \"gold_attributes_merged_a_release_test.json\",\n", + " \"gold_attributes_merged_b_release_test.json\",\n", + " \"gold_attributes_merged_c_release_test.json\",\n", + " ],\n", + " # Wiki common\n", + " \"wiki_common\": [\n", + " \"gold_wiki_common_merged_a_release_test.json\",\n", + " \"gold_wiki_common_merged_b_release_test.json\",\n", + " \"gold_wiki_common_merged_c_release_test.json\",\n", + " ],\n", + "}" + ] + }, + { + "cell_type": "markdown", + "id": "2703e989", + "metadata": {}, + "source": [ + "## Run offline evaluation for all 7 SA-Co/Gold subsets" + ] + }, + { + "cell_type": "markdown", + "id": "0314ddca-46e7-47fd-9f66-346c4f8baf96", + "metadata": {}, + "source": [ + "We assume the inference has already been run for all 7 datasets. With the default configurations, the predictions are dumped in a predictable folder" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "cc28d29f", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Processing subset: metaclip_nps\n", + "loading annotations into memory...\n", + "Done (t=0.28s)\n", + "creating index...\n", + "index created!\n", + "loading annotations into memory...\n", + "Done (t=0.26s)\n", + "creating index...\n", + "index created!\n", + "loading annotations into memory...\n", + "Done (t=0.27s)\n", + "creating index...\n", + "index created!\n", + "Loaded 26221 predictions\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 33057/33057 [00:10<00:00, 3171.54it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Accumulating results\n", + "cgF1 metric, IoU type=segm\n", + " Average cgF1 @[ IoU=0.50:0.95] = 0.473\n", + " Average precision @[ IoU=0.50:0.95] = 0.609\n", + " Average recall @[ IoU=0.50:0.95] = 0.532\n", + " Average F1 @[ IoU=0.50:0.95] = 0.568\n", + " Average positive_macro_F1 @[ IoU=0.50:0.95] = 0.759\n", + " Average positive_micro_F1 @[ IoU=0.50:0.95] = 0.586\n", + " Average positive_micro_precision @[ IoU=0.50:0.95] = 0.652\n", + " Average IL_precision = 0.916\n", + " Average IL_recall = 0.760\n", + " Average IL_F1 = 0.830\n", + " Average IL_FPR = 0.013\n", + " Average IL_MCC = 0.807\n", + " Average cgF1 @[ IoU=0.50 ] = 0.568\n", + " Average precision @[ IoU=0.50 ] = 0.732\n", + " Average recall @[ IoU=0.50 ] = 0.639\n", + " Average F1 @[ IoU=0.50 ] = 0.682\n", + " Average positive_macro_F1 @[ IoU=0.50 ] = 0.872\n", + " Average positive_micro_F1 @[ IoU=0.50 ] = 0.704\n", + " Average positive_micro_precision @[ IoU=0.50 ] = 0.783\n", + " Average cgF1 @[ IoU=0.75 ] = 0.515\n", + " Average precision @[ IoU=0.75 ] = 0.664\n", + " Average recall @[ IoU=0.75 ] = 0.580\n", + " Average F1 @[ IoU=0.75 ] = 0.619\n", + " Average positive_macro_F1 @[ IoU=0.75 ] = 0.815\n", + " Average positive_micro_F1 @[ IoU=0.75 ] = 0.638\n", + " Average positive_micro_precision @[ IoU=0.75 ] = 0.710\n", + "\n", + "{'cgF1_eval_segm_cgF1': 0.47258639187690543, 'cgF1_eval_segm_precision': 0.6094191949336503, 'cgF1_eval_segm_recall': 0.5321857924434281, 'cgF1_eval_segm_F1': 0.5681401781600861, 'cgF1_eval_segm_positive_macro_F1': 0.7594764589657407, 'cgF1_eval_segm_positive_micro_F1': 0.5858321816359353, 'cgF1_eval_segm_positive_micro_precision': 0.6516289867940714, 'cgF1_eval_segm_IL_precision': 0.9157483928108933, 'cgF1_eval_segm_IL_recall': 0.7596648256028061, 'cgF1_eval_segm_IL_F1': 0.8304356444812764, 'cgF1_eval_segm_IL_FPR': 0.013198590231849291, 'cgF1_eval_segm_IL_MCC': 0.8066924397311339, 'cgF1_eval_segm_cgF1@0.5': 0.5675504156248331, 'cgF1_eval_segm_precision@0.5': 0.7318687877530184, 'cgF1_eval_segm_recall@0.5': 0.6391170051959989, 'cgF1_eval_segm_F1@0.5': 0.6823056445199827, 'cgF1_eval_segm_positive_macro_F1@0.5': 0.8723414712702943, 'cgF1_eval_segm_positive_micro_F1@0.5': 0.7035524168467409, 'cgF1_eval_segm_positive_micro_precision@0.5': 0.7825597234127607, 'cgF1_eval_segm_cgF1@0.75': 0.5149222822957974, 'cgF1_eval_segm_precision@0.75': 0.6640084211254257, 'cgF1_eval_segm_recall@0.75': 0.5798567730119127, 'cgF1_eval_segm_F1@0.75': 0.6190362594405769, 'cgF1_eval_segm_positive_macro_F1@0.75': 0.8153807596867352, 'cgF1_eval_segm_positive_micro_F1@0.75': 0.6383130136529085, 'cgF1_eval_segm_positive_micro_precision@0.75': 0.7099991898479673}\n", + "loading annotations into memory...\n", + "Done (t=0.22s)\n", + "creating index...\n", + "index created!\n", + "loading annotations into memory...\n", + "Done (t=0.23s)\n", + "creating index...\n", + "index created!\n", + "loading annotations into memory...\n", + "Done (t=0.21s)\n", + "creating index...\n", + "index created!\n", + "Loaded 26221 predictions\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 33057/33057 [00:08<00:00, 3762.56it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Accumulating results\n", + "cgF1 metric, IoU type=bbox\n", + " Average cgF1 @[ IoU=0.50:0.95] = 0.500\n", + " Average precision @[ IoU=0.50:0.95] = 0.645\n", + " Average recall @[ IoU=0.50:0.95] = 0.563\n", + " Average F1 @[ IoU=0.50:0.95] = 0.601\n", + " Average positive_macro_F1 @[ IoU=0.50:0.95] = 0.813\n", + " Average positive_micro_F1 @[ IoU=0.50:0.95] = 0.620\n", + " Average positive_micro_precision @[ IoU=0.50:0.95] = 0.690\n", + " Average IL_precision = 0.916\n", + " Average IL_recall = 0.760\n", + " Average IL_F1 = 0.831\n", + " Average IL_FPR = 0.013\n", + " Average IL_MCC = 0.807\n", + " Average cgF1 @[ IoU=0.50 ] = 0.571\n", + " Average precision @[ IoU=0.50 ] = 0.736\n", + " Average recall @[ IoU=0.50 ] = 0.642\n", + " Average F1 @[ IoU=0.50 ] = 0.686\n", + " Average positive_macro_F1 @[ IoU=0.50 ] = 0.878\n", + " Average positive_micro_F1 @[ IoU=0.50 ] = 0.707\n", + " Average positive_micro_precision @[ IoU=0.50 ] = 0.787\n", + " Average cgF1 @[ IoU=0.75 ] = 0.530\n", + " Average precision @[ IoU=0.75 ] = 0.683\n", + " Average recall @[ IoU=0.75 ] = 0.596\n", + " Average F1 @[ IoU=0.75 ] = 0.636\n", + " Average positive_macro_F1 @[ IoU=0.75 ] = 0.842\n", + " Average positive_micro_F1 @[ IoU=0.75 ] = 0.656\n", + " Average positive_micro_precision @[ IoU=0.75 ] = 0.731\n", + "\n", + "{'cgF1_eval_bbox_cgF1': 0.5003430734030954, 'cgF1_eval_bbox_precision': 0.6454559084753544, 'cgF1_eval_bbox_recall': 0.5625467145593948, 'cgF1_eval_bbox_F1': 0.6011063859782098, 'cgF1_eval_bbox_positive_macro_F1': 0.8129272614186818, 'cgF1_eval_bbox_positive_micro_F1': 0.6198941576942177, 'cgF1_eval_bbox_positive_micro_precision': 0.6903838360796344, 'cgF1_eval_bbox_IL_precision': 0.916437098044895, 'cgF1_eval_bbox_IL_recall': 0.7598020554320895, 'cgF1_eval_bbox_IL_F1': 0.830800752892585, 'cgF1_eval_bbox_IL_FPR': 0.013092112361504437, 'cgF1_eval_bbox_IL_MCC': 0.8071427471815361, 'cgF1_eval_bbox_cgF1@0.5': 0.5705998939047701, 'cgF1_eval_bbox_precision@0.5': 0.7360818482088336, 'cgF1_eval_bbox_recall@0.5': 0.6415316986326687, 'cgF1_eval_bbox_F1@0.5': 0.6855123686584659, 'cgF1_eval_bbox_positive_macro_F1@0.5': 0.8775997789207387, 'cgF1_eval_bbox_positive_micro_F1@0.5': 0.7069380179618158, 'cgF1_eval_bbox_positive_micro_precision@0.5': 0.7873179304150807, 'cgF1_eval_bbox_cgF1@0.75': 0.5297149268767383, 'cgF1_eval_bbox_precision@0.75': 0.6833433592887199, 'cgF1_eval_bbox_recall@0.75': 0.59556749986514, 'cgF1_eval_bbox_F1@0.75': 0.6363934960024782, 'cgF1_eval_bbox_positive_macro_F1@0.75': 0.8423507795177905, 'cgF1_eval_bbox_positive_micro_F1@0.75': 0.6562840696103028, 'cgF1_eval_bbox_positive_micro_precision@0.75': 0.7309084997915145}\n", + "Processing subset: sa1b_nps\n", + "loading annotations into memory...\n", + "Done (t=0.42s)\n", + "creating index...\n", + "index created!\n", + "loading annotations into memory...\n", + "Done (t=0.42s)\n", + "creating index...\n", + "index created!\n", + "loading annotations into memory...\n", + "Done (t=0.44s)\n", + "creating index...\n", + "index created!\n", + "Loaded 50994 predictions\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 12893/12893 [00:12<00:00, 1019.95it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Accumulating results\n", + "cgF1 metric, IoU type=segm\n", + " Average cgF1 @[ IoU=0.50:0.95] = 0.537\n", + " Average precision @[ IoU=0.50:0.95] = 0.613\n", + " Average recall @[ IoU=0.50:0.95] = 0.624\n", + " Average F1 @[ IoU=0.50:0.95] = 0.618\n", + " Average positive_macro_F1 @[ IoU=0.50:0.95] = 0.749\n", + " Average positive_micro_F1 @[ IoU=0.50:0.95] = 0.626\n", + " Average positive_micro_precision @[ IoU=0.50:0.95] = 0.627\n", + " Average IL_precision = 0.957\n", + " Average IL_recall = 0.918\n", + " Average IL_F1 = 0.937\n", + " Average IL_FPR = 0.055\n", + " Average IL_MCC = 0.858\n", + " Average cgF1 @[ IoU=0.50 ] = 0.662\n", + " Average precision @[ IoU=0.50 ] = 0.755\n", + " Average recall @[ IoU=0.50 ] = 0.769\n", + " Average F1 @[ IoU=0.50 ] = 0.762\n", + " Average positive_macro_F1 @[ IoU=0.50 ] = 0.868\n", + " Average positive_micro_F1 @[ IoU=0.50 ] = 0.771\n", + " Average positive_micro_precision @[ IoU=0.50 ] = 0.773\n", + " Average cgF1 @[ IoU=0.75 ] = 0.584\n", + " Average precision @[ IoU=0.75 ] = 0.666\n", + " Average recall @[ IoU=0.75 ] = 0.679\n", + " Average F1 @[ IoU=0.75 ] = 0.672\n", + " Average positive_macro_F1 @[ IoU=0.75 ] = 0.803\n", + " Average positive_micro_F1 @[ IoU=0.75 ] = 0.680\n", + " Average positive_micro_precision @[ IoU=0.75 ] = 0.682\n", + "\n", + "{'cgF1_eval_segm_cgF1': 0.5368885175297502, 'cgF1_eval_segm_precision': 0.6126733177941744, 'cgF1_eval_segm_recall': 0.623866410830199, 'cgF1_eval_segm_F1': 0.6181692134008732, 'cgF1_eval_segm_positive_macro_F1': 0.7490377907736454, 'cgF1_eval_segm_positive_micro_F1': 0.6255271013781429, 'cgF1_eval_segm_positive_micro_precision': 0.6272971894411186, 'cgF1_eval_segm_IL_precision': 0.9571307299155163, 'cgF1_eval_segm_IL_recall': 0.9180350114102273, 'cgF1_eval_segm_IL_F1': 0.9371748135185498, 'cgF1_eval_segm_IL_FPR': 0.054851556832937805, 'cgF1_eval_segm_IL_MCC': 0.8582977721459122, 'cgF1_eval_segm_cgF1@0.5': 0.6617782611463625, 'cgF1_eval_segm_precision@0.5': 0.7551805547951372, 'cgF1_eval_segm_recall@0.5': 0.7689771507351261, 'cgF1_eval_segm_F1@0.5': 0.7619664171091266, 'cgF1_eval_segm_positive_macro_F1@0.5': 0.8675783572433642, 'cgF1_eval_segm_positive_micro_F1@0.5': 0.7710357437976187, 'cgF1_eval_segm_positive_micro_precision@0.5': 0.7732059252215057, 'cgF1_eval_segm_cgF1@0.75': 0.584028878830561, 'cgF1_eval_segm_precision@0.75': 0.6664635050035673, 'cgF1_eval_segm_recall@0.75': 0.6786393053852088, 'cgF1_eval_segm_F1@0.75': 0.6724463056533416, 'cgF1_eval_segm_positive_macro_F1@0.75': 0.8029021282211246, 'cgF1_eval_segm_positive_micro_F1@0.75': 0.6804501861520328, 'cgF1_eval_segm_positive_micro_precision@0.75': 0.6823712921904396}\n", + "loading annotations into memory...\n", + "Done (t=0.35s)\n", + "creating index...\n", + "index created!\n", + "loading annotations into memory...\n", + "Done (t=0.35s)\n", + "creating index...\n", + "index created!\n", + "loading annotations into memory...\n", + "Done (t=0.38s)\n", + "creating index...\n", + "index created!\n", + "Loaded 50994 predictions\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 12893/12893 [00:07<00:00, 1636.66it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Accumulating results\n", + "cgF1 metric, IoU type=bbox\n", + " Average cgF1 @[ IoU=0.50:0.95] = 0.554\n", + " Average precision @[ IoU=0.50:0.95] = 0.633\n", + " Average recall @[ IoU=0.50:0.95] = 0.642\n", + " Average F1 @[ IoU=0.50:0.95] = 0.637\n", + " Average positive_macro_F1 @[ IoU=0.50:0.95] = 0.786\n", + " Average positive_micro_F1 @[ IoU=0.50:0.95] = 0.645\n", + " Average positive_micro_precision @[ IoU=0.50:0.95] = 0.648\n", + " Average IL_precision = 0.957\n", + " Average IL_recall = 0.918\n", + " Average IL_F1 = 0.937\n", + " Average IL_FPR = 0.055\n", + " Average IL_MCC = 0.858\n", + " Average cgF1 @[ IoU=0.50 ] = 0.656\n", + " Average precision @[ IoU=0.50 ] = 0.749\n", + " Average recall @[ IoU=0.50 ] = 0.760\n", + " Average F1 @[ IoU=0.50 ] = 0.755\n", + " Average positive_macro_F1 @[ IoU=0.50 ] = 0.863\n", + " Average positive_micro_F1 @[ IoU=0.50 ] = 0.764\n", + " Average positive_micro_precision @[ IoU=0.50 ] = 0.768\n", + " Average cgF1 @[ IoU=0.75 ] = 0.589\n", + " Average precision @[ IoU=0.75 ] = 0.673\n", + " Average recall @[ IoU=0.75 ] = 0.683\n", + " Average F1 @[ IoU=0.75 ] = 0.678\n", + " Average positive_macro_F1 @[ IoU=0.75 ] = 0.817\n", + " Average positive_micro_F1 @[ IoU=0.75 ] = 0.686\n", + " Average positive_micro_precision @[ IoU=0.75 ] = 0.690\n", + "\n", + "{'cgF1_eval_bbox_cgF1': 0.5536231468412395, 'cgF1_eval_bbox_precision': 0.6328165449001351, 'cgF1_eval_bbox_recall': 0.6417390943649743, 'cgF1_eval_bbox_F1': 0.6371965950434251, 'cgF1_eval_bbox_positive_macro_F1': 0.785505899426481, 'cgF1_eval_bbox_positive_micro_F1': 0.6449030271971777, 'cgF1_eval_bbox_positive_micro_precision': 0.6481993029451039, 'cgF1_eval_bbox_IL_precision': 0.957272212654602, 'cgF1_eval_bbox_IL_recall': 0.9180461328469408, 'cgF1_eval_bbox_IL_F1': 0.9372484265333024, 'cgF1_eval_bbox_IL_FPR': 0.05468042729410095, 'cgF1_eval_bbox_IL_MCC': 0.8584595256861315, 'cgF1_eval_bbox_cgF1@0.5': 0.655649783867308, 'cgF1_eval_bbox_precision@0.5': 0.7494286121079209, 'cgF1_eval_bbox_recall@0.5': 0.7599953615328335, 'cgF1_eval_bbox_F1@0.5': 0.7546250062240573, 'cgF1_eval_bbox_positive_macro_F1@0.5': 0.8630646236311263, 'cgF1_eval_bbox_positive_micro_F1@0.5': 0.7637515389479476, 'cgF1_eval_bbox_positive_micro_precision@0.5': 0.7676460229909632, 'cgF1_eval_bbox_cgF1@0.75': 0.5891939922914327, 'cgF1_eval_bbox_precision@0.75': 0.6734724947681224, 'cgF1_eval_bbox_recall@0.75': 0.6829682826014282, 'cgF1_eval_bbox_F1@0.75': 0.6781371571048335, 'cgF1_eval_bbox_positive_macro_F1@0.75': 0.8172625456348024, 'cgF1_eval_bbox_positive_micro_F1@0.75': 0.6863386970055625, 'cgF1_eval_bbox_positive_micro_precision@0.75': 0.6898435339270219}\n", + "Processing subset: crowded\n", + "loading annotations into memory...\n", + "Done (t=0.42s)\n", + "creating index...\n", + "index created!\n", + "loading annotations into memory...\n", + "Done (t=0.42s)\n", + "creating index...\n", + "index created!\n", + "loading annotations into memory...\n", + "Done (t=0.42s)\n", + "creating index...\n", + "index created!\n", + "Loaded 82963 predictions\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 20241/20241 [00:10<00:00, 2003.85it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Accumulating results\n", + "cgF1 metric, IoU type=segm\n", + " Average cgF1 @[ IoU=0.50:0.95] = 0.611\n", + " Average precision @[ IoU=0.50:0.95] = 0.643\n", + " Average recall @[ IoU=0.50:0.95] = 0.686\n", + " Average F1 @[ IoU=0.50:0.95] = 0.664\n", + " Average positive_macro_F1 @[ IoU=0.50:0.95] = 0.689\n", + " Average positive_micro_F1 @[ IoU=0.50:0.95] = 0.677\n", + " Average positive_micro_precision @[ IoU=0.50:0.95] = 0.669\n", + " Average IL_precision = 0.933\n", + " Average IL_recall = 0.913\n", + " Average IL_F1 = 0.923\n", + " Average IL_FPR = 0.018\n", + " Average IL_MCC = 0.902\n", + " Average cgF1 @[ IoU=0.50 ] = 0.735\n", + " Average precision @[ IoU=0.50 ] = 0.773\n", + " Average recall @[ IoU=0.50 ] = 0.825\n", + " Average F1 @[ IoU=0.50 ] = 0.798\n", + " Average positive_macro_F1 @[ IoU=0.50 ] = 0.830\n", + " Average positive_micro_F1 @[ IoU=0.50 ] = 0.815\n", + " Average positive_micro_precision @[ IoU=0.50 ] = 0.805\n", + " Average cgF1 @[ IoU=0.75 ] = 0.667\n", + " Average precision @[ IoU=0.75 ] = 0.702\n", + " Average recall @[ IoU=0.75 ] = 0.748\n", + " Average F1 @[ IoU=0.75 ] = 0.724\n", + " Average positive_macro_F1 @[ IoU=0.75 ] = 0.748\n", + " Average positive_micro_F1 @[ IoU=0.75 ] = 0.739\n", + " Average positive_micro_precision @[ IoU=0.75 ] = 0.730\n", + "\n", + "{'cgF1_eval_segm_cgF1': 0.6107822436866207, 'cgF1_eval_segm_precision': 0.6429349935778358, 'cgF1_eval_segm_recall': 0.6856112974213837, 'cgF1_eval_segm_F1': 0.663537765323902, 'cgF1_eval_segm_positive_macro_F1': 0.6891461338399572, 'cgF1_eval_segm_positive_micro_F1': 0.6772901260675144, 'cgF1_eval_segm_positive_micro_precision': 0.6692661193229303, 'cgF1_eval_segm_IL_precision': 0.9330264669889466, 'cgF1_eval_segm_IL_recall': 0.9132687540181867, 'cgF1_eval_segm_IL_F1': 0.9230413942007347, 'cgF1_eval_segm_IL_FPR': 0.018415390455738805, 'cgF1_eval_segm_IL_MCC': 0.901802964754481, 'cgF1_eval_segm_cgF1@0.5': 0.7346796584523991, 'cgF1_eval_segm_precision@0.5': 0.77334498201075, 'cgF1_eval_segm_recall@0.5': 0.8246775518006043, 'cgF1_eval_segm_F1@0.5': 0.7981368549267828, 'cgF1_eval_segm_positive_macro_F1@0.5': 0.829596777967627, 'cgF1_eval_segm_positive_micro_F1@0.5': 0.8146786905412517, 'cgF1_eval_segm_positive_micro_precision@0.5': 0.8050169926635622, 'cgF1_eval_segm_cgF1@0.75': 0.6666293625039935, 'cgF1_eval_segm_precision@0.75': 0.7017176741205416, 'cgF1_eval_segm_recall@0.75': 0.7482958149470458, 'cgF1_eval_segm_F1@0.75': 0.7242086950813854, 'cgF1_eval_segm_positive_macro_F1@0.75': 0.748186632903701, 'cgF1_eval_segm_positive_micro_F1@0.75': 0.7392184197193072, 'cgF1_eval_segm_positive_micro_precision@0.75': 0.7304562192291248}\n", + "loading annotations into memory...\n", + "Done (t=0.36s)\n", + "creating index...\n", + "index created!\n", + "loading annotations into memory...\n", + "Done (t=0.37s)\n", + "creating index...\n", + "index created!\n", + "loading annotations into memory...\n", + "Done (t=0.37s)\n", + "creating index...\n", + "index created!\n", + "Loaded 82963 predictions\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 20241/20241 [00:07<00:00, 2785.45it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Accumulating results\n", + "cgF1 metric, IoU type=bbox\n", + " Average cgF1 @[ IoU=0.50:0.95] = 0.617\n", + " Average precision @[ IoU=0.50:0.95] = 0.650\n", + " Average recall @[ IoU=0.50:0.95] = 0.692\n", + " Average F1 @[ IoU=0.50:0.95] = 0.670\n", + " Average positive_macro_F1 @[ IoU=0.50:0.95] = 0.731\n", + " Average positive_micro_F1 @[ IoU=0.50:0.95] = 0.684\n", + " Average positive_micro_precision @[ IoU=0.50:0.95] = 0.677\n", + " Average IL_precision = 0.933\n", + " Average IL_recall = 0.913\n", + " Average IL_F1 = 0.923\n", + " Average IL_FPR = 0.018\n", + " Average IL_MCC = 0.902\n", + " Average cgF1 @[ IoU=0.50 ] = 0.730\n", + " Average precision @[ IoU=0.50 ] = 0.769\n", + " Average recall @[ IoU=0.50 ] = 0.818\n", + " Average F1 @[ IoU=0.50 ] = 0.793\n", + " Average positive_macro_F1 @[ IoU=0.50 ] = 0.829\n", + " Average positive_micro_F1 @[ IoU=0.50 ] = 0.809\n", + " Average positive_micro_precision @[ IoU=0.50 ] = 0.801\n", + " Average cgF1 @[ IoU=0.75 ] = 0.668\n", + " Average precision @[ IoU=0.75 ] = 0.704\n", + " Average recall @[ IoU=0.75 ] = 0.749\n", + " Average F1 @[ IoU=0.75 ] = 0.726\n", + " Average positive_macro_F1 @[ IoU=0.75 ] = 0.776\n", + " Average positive_micro_F1 @[ IoU=0.75 ] = 0.741\n", + " Average positive_micro_precision @[ IoU=0.75 ] = 0.733\n", + "\n", + "{'cgF1_eval_bbox_cgF1': 0.6170650320984323, 'cgF1_eval_bbox_precision': 0.6504449386041414, 'cgF1_eval_bbox_recall': 0.691669313906474, 'cgF1_eval_bbox_F1': 0.6703740536459668, 'cgF1_eval_bbox_positive_macro_F1': 0.7305480221708001, 'cgF1_eval_bbox_positive_micro_F1': 0.6842570452919619, 'cgF1_eval_bbox_positive_micro_precision': 0.677099838003926, 'cgF1_eval_bbox_IL_precision': 0.9330264669889466, 'cgF1_eval_bbox_IL_recall': 0.9132687540181867, 'cgF1_eval_bbox_IL_F1': 0.9230413942007347, 'cgF1_eval_bbox_IL_FPR': 0.018415390455738805, 'cgF1_eval_bbox_IL_MCC': 0.901802964754481, 'cgF1_eval_bbox_cgF1@0.5': 0.7298487590689315, 'cgF1_eval_bbox_precision@0.5': 0.7693209759495023, 'cgF1_eval_bbox_recall@0.5': 0.818079563737977, 'cgF1_eval_bbox_F1@0.5': 0.7929014858831388, 'cgF1_eval_bbox_positive_macro_F1@0.5': 0.8289958931744248, 'cgF1_eval_bbox_positive_micro_F1@0.5': 0.8093217560752147, 'cgF1_eval_bbox_positive_micro_precision@0.5': 0.8008473542838228, 'cgF1_eval_bbox_cgF1@0.75': 0.6681534554056624, 'cgF1_eval_bbox_precision@0.75': 0.7042930379997401, 'cgF1_eval_bbox_recall@0.75': 0.7489302375506022, 'cgF1_eval_bbox_F1@0.75': 0.7258761503763029, 'cgF1_eval_bbox_positive_macro_F1@0.75': 0.7757641647406367, 'cgF1_eval_bbox_positive_micro_F1@0.75': 0.7409084706076228, 'cgF1_eval_bbox_positive_micro_precision@0.75': 0.7331546048467947}\n", + "Processing subset: fg_food\n", + "loading annotations into memory...\n", + "Done (t=0.12s)\n", + "creating index...\n", + "index created!\n", + "loading annotations into memory...\n", + "Done (t=0.12s)\n", + "creating index...\n", + "index created!\n", + "loading annotations into memory...\n", + "Done (t=0.10s)\n", + "creating index...\n", + "index created!\n", + "Loaded 10846 predictions\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 13794/13794 [00:03<00:00, 3963.81it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Accumulating results\n", + "cgF1 metric, IoU type=segm\n", + " Average cgF1 @[ IoU=0.50:0.95] = 0.534\n", + " Average precision @[ IoU=0.50:0.95] = 0.734\n", + " Average recall @[ IoU=0.50:0.95] = 0.583\n", + " Average F1 @[ IoU=0.50:0.95] = 0.650\n", + " Average positive_macro_F1 @[ IoU=0.50:0.95] = 0.825\n", + " Average positive_micro_F1 @[ IoU=0.50:0.95] = 0.673\n", + " Average positive_micro_precision @[ IoU=0.50:0.95] = 0.795\n", + " Average IL_precision = 0.917\n", + " Average IL_recall = 0.713\n", + " Average IL_F1 = 0.802\n", + " Average IL_FPR = 0.006\n", + " Average IL_MCC = 0.794\n", + " Average cgF1 @[ IoU=0.50 ] = 0.582\n", + " Average precision @[ IoU=0.50 ] = 0.800\n", + " Average recall @[ IoU=0.50 ] = 0.635\n", + " Average F1 @[ IoU=0.50 ] = 0.708\n", + " Average positive_macro_F1 @[ IoU=0.50 ] = 0.885\n", + " Average positive_micro_F1 @[ IoU=0.50 ] = 0.733\n", + " Average positive_micro_precision @[ IoU=0.50 ] = 0.866\n", + " Average cgF1 @[ IoU=0.75 ] = 0.561\n", + " Average precision @[ IoU=0.75 ] = 0.771\n", + " Average recall @[ IoU=0.75 ] = 0.612\n", + " Average F1 @[ IoU=0.75 ] = 0.682\n", + " Average positive_macro_F1 @[ IoU=0.75 ] = 0.854\n", + " Average positive_micro_F1 @[ IoU=0.75 ] = 0.706\n", + " Average positive_micro_precision @[ IoU=0.75 ] = 0.835\n", + "\n", + "{'cgF1_eval_segm_cgF1': 0.5340544897290387, 'cgF1_eval_segm_precision': 0.734205779536348, 'cgF1_eval_segm_recall': 0.5831005937305522, 'cgF1_eval_segm_F1': 0.6499373841120774, 'cgF1_eval_segm_positive_macro_F1': 0.8250979043741161, 'cgF1_eval_segm_positive_micro_F1': 0.6727883081524807, 'cgF1_eval_segm_positive_micro_precision': 0.7952174681839976, 'cgF1_eval_segm_IL_precision': 0.9171210458919072, 'cgF1_eval_segm_IL_recall': 0.7133163691999013, 'cgF1_eval_segm_IL_F1': 0.8024804230776101, 'cgF1_eval_segm_IL_FPR': 0.006024573919459011, 'cgF1_eval_segm_IL_MCC': 0.7937927625341562, 'cgF1_eval_segm_cgF1@0.5': 0.5816253704026515, 'cgF1_eval_segm_precision@0.5': 0.7996003836243679, 'cgF1_eval_segm_recall@0.5': 0.6350364862736196, 'cgF1_eval_segm_F1@0.5': 0.7078307231011086, 'cgF1_eval_segm_positive_macro_F1@0.5': 0.8854304579823623, 'cgF1_eval_segm_positive_micro_F1@0.5': 0.732716897727604, 'cgF1_eval_segm_positive_micro_precision@0.5': 0.8660462915809068, 'cgF1_eval_segm_cgF1@0.75': 0.5605505836352753, 'cgF1_eval_segm_precision@0.75': 0.7706293552321807, 'cgF1_eval_segm_recall@0.75': 0.6120279179303726, 'cgF1_eval_segm_F1@0.75': 0.6821828949054192, 'cgF1_eval_segm_positive_macro_F1@0.75': 0.8542637117087882, 'cgF1_eval_segm_positive_micro_F1@0.75': 0.7061674155931289, 'cgF1_eval_segm_positive_micro_precision@0.75': 0.8346678027555117}\n", + "loading annotations into memory...\n", + "Done (t=0.09s)\n", + "creating index...\n", + "index created!\n", + "loading annotations into memory...\n", + "Done (t=0.08s)\n", + "creating index...\n", + "index created!\n", + "loading annotations into memory...\n", + "Done (t=0.08s)\n", + "creating index...\n", + "index created!\n", + "Loaded 10846 predictions\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 13794/13794 [00:03<00:00, 4490.07it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Accumulating results\n", + "cgF1 metric, IoU type=bbox\n", + " Average cgF1 @[ IoU=0.50:0.95] = 0.538\n", + " Average precision @[ IoU=0.50:0.95] = 0.737\n", + " Average recall @[ IoU=0.50:0.95] = 0.588\n", + " Average F1 @[ IoU=0.50:0.95] = 0.654\n", + " Average positive_macro_F1 @[ IoU=0.50:0.95] = 0.859\n", + " Average positive_micro_F1 @[ IoU=0.50:0.95] = 0.676\n", + " Average positive_micro_precision @[ IoU=0.50:0.95] = 0.797\n", + " Average IL_precision = 0.919\n", + " Average IL_recall = 0.714\n", + " Average IL_F1 = 0.804\n", + " Average IL_FPR = 0.006\n", + " Average IL_MCC = 0.795\n", + " Average cgF1 @[ IoU=0.50 ] = 0.583\n", + " Average precision @[ IoU=0.50 ] = 0.798\n", + " Average recall @[ IoU=0.50 ] = 0.637\n", + " Average F1 @[ IoU=0.50 ] = 0.708\n", + " Average positive_macro_F1 @[ IoU=0.50 ] = 0.898\n", + " Average positive_micro_F1 @[ IoU=0.50 ] = 0.733\n", + " Average positive_micro_precision @[ IoU=0.50 ] = 0.864\n", + " Average cgF1 @[ IoU=0.75 ] = 0.560\n", + " Average precision @[ IoU=0.75 ] = 0.766\n", + " Average recall @[ IoU=0.75 ] = 0.612\n", + " Average F1 @[ IoU=0.75 ] = 0.680\n", + " Average positive_macro_F1 @[ IoU=0.75 ] = 0.875\n", + " Average positive_micro_F1 @[ IoU=0.75 ] = 0.704\n", + " Average positive_micro_precision @[ IoU=0.75 ] = 0.829\n", + "\n", + "{'cgF1_eval_bbox_cgF1': 0.5378561873516663, 'cgF1_eval_bbox_precision': 0.7365034817881422, 'cgF1_eval_bbox_recall': 0.5876295139864577, 'cgF1_eval_bbox_F1': 0.6536480883299698, 'cgF1_eval_bbox_positive_macro_F1': 0.8586355938093078, 'cgF1_eval_bbox_positive_micro_F1': 0.6764422338399003, 'cgF1_eval_bbox_positive_micro_precision': 0.7970161989834335, 'cgF1_eval_bbox_IL_precision': 0.9193020709713172, 'cgF1_eval_bbox_IL_recall': 0.7138018622237071, 'cgF1_eval_bbox_IL_F1': 0.8036220047681086, 'cgF1_eval_bbox_IL_FPR': 0.005866962657110365, 'cgF1_eval_bbox_IL_MCC': 0.7951250830369732, 'cgF1_eval_bbox_cgF1@0.5': 0.5827707578455898, 'cgF1_eval_bbox_precision@0.5': 0.7980019820579024, 'cgF1_eval_bbox_recall@0.5': 0.6366969450550463, 'cgF1_eval_bbox_F1@0.5': 0.7082322331345615, 'cgF1_eval_bbox_positive_macro_F1@0.5': 0.8975145206561829, 'cgF1_eval_bbox_positive_micro_F1@0.5': 0.7329296613555469, 'cgF1_eval_bbox_positive_micro_precision@0.5': 0.8635675488958368, 'cgF1_eval_bbox_cgF1@0.75': 0.5597151953142597, 'cgF1_eval_bbox_precision@0.75': 0.7664335511202087, 'cgF1_eval_bbox_recall@0.75': 0.6115096347599293, 'cgF1_eval_bbox_F1@0.75': 0.6802130814411795, 'cgF1_eval_bbox_positive_macro_F1@0.75': 0.8745268555395833, 'cgF1_eval_bbox_positive_micro_F1@0.75': 0.7039335159399481, 'cgF1_eval_bbox_positive_micro_precision@0.75': 0.829405387472316}\n", + "Processing subset: fg_sports_equipment\n", + "loading annotations into memory...\n", + "Done (t=0.08s)\n", + "creating index...\n", + "index created!\n", + "loading annotations into memory...\n", + "Done (t=0.08s)\n", + "creating index...\n", + "index created!\n", + "loading annotations into memory...\n", + "Done (t=0.08s)\n", + "creating index...\n", + "index created!\n", + "Loaded 6562 predictions\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 12107/12107 [00:03<00:00, 3306.95it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Accumulating results\n", + "cgF1 metric, IoU type=segm\n", + " Average cgF1 @[ IoU=0.50:0.95] = 0.655\n", + " Average precision @[ IoU=0.50:0.95] = 0.733\n", + " Average recall @[ IoU=0.50:0.95] = 0.701\n", + " Average F1 @[ IoU=0.50:0.95] = 0.717\n", + " Average positive_macro_F1 @[ IoU=0.50:0.95] = 0.840\n", + " Average positive_micro_F1 @[ IoU=0.50:0.95] = 0.738\n", + " Average positive_micro_precision @[ IoU=0.50:0.95] = 0.778\n", + " Average IL_precision = 0.962\n", + " Average IL_recall = 0.850\n", + " Average IL_F1 = 0.903\n", + " Average IL_FPR = 0.006\n", + " Average IL_MCC = 0.888\n", + " Average cgF1 @[ IoU=0.50 ] = 0.737\n", + " Average precision @[ IoU=0.50 ] = 0.825\n", + " Average recall @[ IoU=0.50 ] = 0.789\n", + " Average F1 @[ IoU=0.50 ] = 0.807\n", + " Average positive_macro_F1 @[ IoU=0.50 ] = 0.920\n", + " Average positive_micro_F1 @[ IoU=0.50 ] = 0.830\n", + " Average positive_micro_precision @[ IoU=0.50 ] = 0.875\n", + " Average cgF1 @[ IoU=0.75 ] = 0.697\n", + " Average precision @[ IoU=0.75 ] = 0.780\n", + " Average recall @[ IoU=0.75 ] = 0.746\n", + " Average F1 @[ IoU=0.75 ] = 0.763\n", + " Average positive_macro_F1 @[ IoU=0.75 ] = 0.879\n", + " Average positive_micro_F1 @[ IoU=0.75 ] = 0.785\n", + " Average positive_micro_precision @[ IoU=0.75 ] = 0.827\n", + "\n", + "{'cgF1_eval_segm_cgF1': 0.6552238992458554, 'cgF1_eval_segm_precision': 0.7333019582948739, 'cgF1_eval_segm_recall': 0.7013045276359755, 'cgF1_eval_segm_F1': 0.7168964364723446, 'cgF1_eval_segm_positive_macro_F1': 0.8401797467726777, 'cgF1_eval_segm_positive_micro_F1': 0.7375018641548449, 'cgF1_eval_segm_positive_micro_precision': 0.7777500429595898, 'cgF1_eval_segm_IL_precision': 0.9624470012341326, 'cgF1_eval_segm_IL_recall': 0.8497326198664531, 'cgF1_eval_segm_IL_F1': 0.9025839944636483, 'cgF1_eval_segm_IL_FPR': 0.0060564618534671814, 'cgF1_eval_segm_IL_MCC': 0.8884369397448539, 'cgF1_eval_segm_cgF1@0.5': 0.7373951194278512, 'cgF1_eval_segm_precision@0.5': 0.8252586823786763, 'cgF1_eval_segm_recall@0.5': 0.7892487451810898, 'cgF1_eval_segm_F1@0.5': 0.8068021593354898, 'cgF1_eval_segm_positive_macro_F1@0.5': 0.920431978172454, 'cgF1_eval_segm_positive_micro_F1@0.5': 0.8299915125541946, 'cgF1_eval_segm_positive_micro_precision@0.5': 0.8752805967752407, 'cgF1_eval_segm_cgF1@0.75': 0.6970450572765126, 'cgF1_eval_segm_precision@0.75': 0.7801034623682158, 'cgF1_eval_segm_recall@0.75': 0.7460638608622613, 'cgF1_eval_segm_F1@0.75': 0.7626540805113754, 'cgF1_eval_segm_positive_macro_F1@0.75': 0.8790410692537068, 'cgF1_eval_segm_positive_micro_F1@0.75': 0.7845746007327135, 'cgF1_eval_segm_positive_micro_precision@0.75': 0.8273883555153815}\n", + "loading annotations into memory...\n", + "Done (t=0.06s)\n", + "creating index...\n", + "index created!\n", + "loading annotations into memory...\n", + "Done (t=0.06s)\n", + "creating index...\n", + "index created!\n", + "loading annotations into memory...\n", + "Done (t=0.06s)\n", + "creating index...\n", + "index created!\n", + "Loaded 6562 predictions\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 12107/12107 [00:03<00:00, 3661.16it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Accumulating results\n", + "cgF1 metric, IoU type=bbox\n", + " Average cgF1 @[ IoU=0.50:0.95] = 0.681\n", + " Average precision @[ IoU=0.50:0.95] = 0.760\n", + " Average recall @[ IoU=0.50:0.95] = 0.730\n", + " Average F1 @[ IoU=0.50:0.95] = 0.745\n", + " Average positive_macro_F1 @[ IoU=0.50:0.95] = 0.879\n", + " Average positive_micro_F1 @[ IoU=0.50:0.95] = 0.766\n", + " Average positive_micro_precision @[ IoU=0.50:0.95] = 0.806\n", + " Average IL_precision = 0.962\n", + " Average IL_recall = 0.850\n", + " Average IL_F1 = 0.903\n", + " Average IL_FPR = 0.006\n", + " Average IL_MCC = 0.888\n", + " Average cgF1 @[ IoU=0.50 ] = 0.735\n", + " Average precision @[ IoU=0.50 ] = 0.821\n", + " Average recall @[ IoU=0.50 ] = 0.788\n", + " Average F1 @[ IoU=0.50 ] = 0.804\n", + " Average positive_macro_F1 @[ IoU=0.50 ] = 0.921\n", + " Average positive_micro_F1 @[ IoU=0.50 ] = 0.827\n", + " Average positive_micro_precision @[ IoU=0.50 ] = 0.870\n", + " Average cgF1 @[ IoU=0.75 ] = 0.711\n", + " Average precision @[ IoU=0.75 ] = 0.794\n", + " Average recall @[ IoU=0.75 ] = 0.762\n", + " Average F1 @[ IoU=0.75 ] = 0.777\n", + " Average positive_macro_F1 @[ IoU=0.75 ] = 0.901\n", + " Average positive_micro_F1 @[ IoU=0.75 ] = 0.800\n", + " Average positive_micro_precision @[ IoU=0.75 ] = 0.842\n", + "\n", + "{'cgF1_eval_bbox_cgF1': 0.6806904643208772, 'cgF1_eval_bbox_precision': 0.7600893518323295, 'cgF1_eval_bbox_recall': 0.7300429019642443, 'cgF1_eval_bbox_F1': 0.744713228137678, 'cgF1_eval_bbox_positive_macro_F1': 0.8793127218535274, 'cgF1_eval_bbox_positive_micro_F1': 0.766166324102149, 'cgF1_eval_bbox_positive_micro_precision': 0.806161117331975, 'cgF1_eval_bbox_IL_precision': 0.9624470012341326, 'cgF1_eval_bbox_IL_recall': 0.8497326198664531, 'cgF1_eval_bbox_IL_F1': 0.9025839944636483, 'cgF1_eval_bbox_IL_FPR': 0.0060564618534671814, 'cgF1_eval_bbox_IL_MCC': 0.8884369397448539, 'cgF1_eval_bbox_cgF1@0.5': 0.7348434113860801, 'cgF1_eval_bbox_precision@0.5': 0.8205550136275866, 'cgF1_eval_bbox_recall@0.5': 0.7881183467784425, 'cgF1_eval_bbox_F1@0.5': 0.8039596834419491, 'cgF1_eval_bbox_positive_macro_F1@0.5': 0.9208045886576268, 'cgF1_eval_bbox_positive_micro_F1@0.5': 0.8271193806924737, 'cgF1_eval_bbox_positive_micro_precision@0.5': 0.8702918216440054, 'cgF1_eval_bbox_cgF1@0.75': 0.7106209729640045, 'cgF1_eval_bbox_precision@0.75': 0.7935089183088213, 'cgF1_eval_bbox_recall@0.75': 0.7621413877989296, 'cgF1_eval_bbox_F1@0.75': 0.7774589352243091, 'cgF1_eval_bbox_positive_macro_F1@0.75': 0.9005662678327496, 'cgF1_eval_bbox_positive_micro_F1@0.75': 0.799855275229871, 'cgF1_eval_bbox_positive_micro_precision@0.75': 0.8416063646394022}\n", + "Processing subset: attributes\n", + "loading annotations into memory...\n", + "Done (t=0.07s)\n", + "creating index...\n", + "index created!\n", + "loading annotations into memory...\n", + "Done (t=0.06s)\n", + "creating index...\n", + "index created!\n", + "loading annotations into memory...\n", + "Done (t=0.06s)\n", + "creating index...\n", + "index created!\n", + "Loaded 5834 predictions\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 9222/9222 [00:03<00:00, 2820.36it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Accumulating results\n", + "cgF1 metric, IoU type=segm\n", + " Average cgF1 @[ IoU=0.50:0.95] = 0.549\n", + " Average precision @[ IoU=0.50:0.95] = 0.643\n", + " Average recall @[ IoU=0.50:0.95] = 0.670\n", + " Average F1 @[ IoU=0.50:0.95] = 0.656\n", + " Average positive_macro_F1 @[ IoU=0.50:0.95] = 0.872\n", + " Average positive_micro_F1 @[ IoU=0.50:0.95] = 0.720\n", + " Average positive_micro_precision @[ IoU=0.50:0.95] = 0.778\n", + " Average IL_precision = 0.797\n", + " Average IL_recall = 0.819\n", + " Average IL_F1 = 0.808\n", + " Average IL_FPR = 0.048\n", + " Average IL_MCC = 0.763\n", + " Average cgF1 @[ IoU=0.50 ] = 0.600\n", + " Average precision @[ IoU=0.50 ] = 0.703\n", + " Average recall @[ IoU=0.50 ] = 0.733\n", + " Average F1 @[ IoU=0.50 ] = 0.717\n", + " Average positive_macro_F1 @[ IoU=0.50 ] = 0.930\n", + " Average positive_micro_F1 @[ IoU=0.50 ] = 0.787\n", + " Average positive_micro_precision @[ IoU=0.50 ] = 0.850\n", + " Average cgF1 @[ IoU=0.75 ] = 0.569\n", + " Average precision @[ IoU=0.75 ] = 0.667\n", + " Average recall @[ IoU=0.75 ] = 0.695\n", + " Average F1 @[ IoU=0.75 ] = 0.680\n", + " Average positive_macro_F1 @[ IoU=0.75 ] = 0.890\n", + " Average positive_micro_F1 @[ IoU=0.75 ] = 0.746\n", + " Average positive_micro_precision @[ IoU=0.75 ] = 0.806\n", + "\n", + "{'cgF1_eval_segm_cgF1': 0.5492693860624807, 'cgF1_eval_segm_precision': 0.6432095082730505, 'cgF1_eval_segm_recall': 0.670390051111405, 'cgF1_eval_segm_F1': 0.6564685998234858, 'cgF1_eval_segm_positive_macro_F1': 0.8717482377036931, 'cgF1_eval_segm_positive_micro_F1': 0.7200264336735771, 'cgF1_eval_segm_positive_micro_precision': 0.7777168057004865, 'cgF1_eval_segm_IL_precision': 0.7970687706893637, 'cgF1_eval_segm_IL_recall': 0.8187608565033231, 'cgF1_eval_segm_IL_F1': 0.807768708426457, 'cgF1_eval_segm_IL_FPR': 0.0480320213411565, 'cgF1_eval_segm_IL_MCC': 0.7628461406064031, 'cgF1_eval_segm_cgF1@0.5': 0.6002145763651893, 'cgF1_eval_segm_precision@0.5': 0.7028636035479556, 'cgF1_eval_segm_recall@0.5': 0.7325649901724294, 'cgF1_eval_segm_F1@0.5': 0.7173570363948575, 'cgF1_eval_segm_positive_macro_F1@0.5': 0.9296877722553353, 'cgF1_eval_segm_positive_micro_F1@0.5': 0.786809481513619, 'cgF1_eval_segm_positive_micro_precision@0.5': 0.849845702782115, 'cgF1_eval_segm_cgF1@0.75': 0.5692213046663587, 'cgF1_eval_segm_precision@0.75': 0.6665721387419297, 'cgF1_eval_segm_recall@0.75': 0.6947399321885362, 'cgF1_eval_segm_F1@0.75': 0.6803146414763769, 'cgF1_eval_segm_positive_macro_F1@0.75': 0.8904105590365932, 'cgF1_eval_segm_positive_micro_F1@0.75': 0.7461810113031079, 'cgF1_eval_segm_positive_micro_precision@0.75': 0.8059650049377783}\n", + "loading annotations into memory...\n", + "Done (t=0.05s)\n", + "creating index...\n", + "index created!\n", + "loading annotations into memory...\n", + "Done (t=0.05s)\n", + "creating index...\n", + "index created!\n", + "loading annotations into memory...\n", + "Done (t=0.06s)\n", + "creating index...\n", + "index created!\n", + "Loaded 5834 predictions\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 9222/9222 [00:02<00:00, 3370.79it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Accumulating results\n", + "cgF1 metric, IoU type=bbox\n", + " Average cgF1 @[ IoU=0.50:0.95] = 0.565\n", + " Average precision @[ IoU=0.50:0.95] = 0.660\n", + " Average recall @[ IoU=0.50:0.95] = 0.689\n", + " Average F1 @[ IoU=0.50:0.95] = 0.674\n", + " Average positive_macro_F1 @[ IoU=0.50:0.95] = 0.901\n", + " Average positive_micro_F1 @[ IoU=0.50:0.95] = 0.739\n", + " Average positive_micro_precision @[ IoU=0.50:0.95] = 0.798\n", + " Average IL_precision = 0.798\n", + " Average IL_recall = 0.819\n", + " Average IL_F1 = 0.808\n", + " Average IL_FPR = 0.048\n", + " Average IL_MCC = 0.764\n", + " Average cgF1 @[ IoU=0.50 ] = 0.602\n", + " Average precision @[ IoU=0.50 ] = 0.703\n", + " Average recall @[ IoU=0.50 ] = 0.734\n", + " Average F1 @[ IoU=0.50 ] = 0.718\n", + " Average positive_macro_F1 @[ IoU=0.50 ] = 0.934\n", + " Average positive_micro_F1 @[ IoU=0.50 ] = 0.788\n", + " Average positive_micro_precision @[ IoU=0.50 ] = 0.850\n", + " Average cgF1 @[ IoU=0.75 ] = 0.580\n", + " Average precision @[ IoU=0.75 ] = 0.679\n", + " Average recall @[ IoU=0.75 ] = 0.708\n", + " Average F1 @[ IoU=0.75 ] = 0.693\n", + " Average positive_macro_F1 @[ IoU=0.75 ] = 0.911\n", + " Average positive_micro_F1 @[ IoU=0.75 ] = 0.760\n", + " Average positive_micro_precision @[ IoU=0.75 ] = 0.820\n", + "\n", + "{'cgF1_eval_bbox_cgF1': 0.5646399041045982, 'cgF1_eval_bbox_precision': 0.6603061905215142, 'cgF1_eval_bbox_recall': 0.6892275617275063, 'cgF1_eval_bbox_F1': 0.6744070010794116, 'cgF1_eval_bbox_positive_macro_F1': 0.9009593951299972, 'cgF1_eval_bbox_positive_micro_F1': 0.7394009518446127, 'cgF1_eval_bbox_positive_micro_precision': 0.7975684658366964, 'cgF1_eval_bbox_IL_precision': 0.7981961664046244, 'cgF1_eval_bbox_IL_recall': 0.8189705027073623, 'cgF1_eval_bbox_IL_F1': 0.8084493997068858, 'cgF1_eval_bbox_IL_FPR': 0.04777792605795036, 'cgF1_eval_bbox_IL_MCC': 0.7636450868719721, 'cgF1_eval_bbox_cgF1@0.5': 0.6015189050487545, 'cgF1_eval_bbox_precision@0.5': 0.7034306576855498, 'cgF1_eval_bbox_recall@0.5': 0.7342408779449281, 'cgF1_eval_bbox_F1@0.5': 0.7184556528947154, 'cgF1_eval_bbox_positive_macro_F1@0.5': 0.9340793389274347, 'cgF1_eval_bbox_positive_micro_F1@0.5': 0.7876943299834277, 'cgF1_eval_bbox_positive_micro_precision@0.5': 0.8496575051487155, 'cgF1_eval_bbox_cgF1@0.75': 0.5804244074542156, 'cgF1_eval_bbox_precision@0.75': 0.678763802700204, 'cgF1_eval_bbox_recall@0.75': 0.7084936162032075, 'cgF1_eval_bbox_F1@0.75': 0.693260171561296, 'cgF1_eval_bbox_positive_macro_F1@0.75': 0.9107485801358162, 'cgF1_eval_bbox_positive_micro_F1@0.75': 0.7600708986837572, 'cgF1_eval_bbox_positive_micro_precision@0.75': 0.8198629856211306}\n", + "Processing subset: wiki_common\n", + "loading annotations into memory...\n", + "Done (t=0.23s)\n", + "creating index...\n", + "index created!\n", + "loading annotations into memory...\n", + "Done (t=0.21s)\n", + "creating index...\n", + "index created!\n", + "loading annotations into memory...\n", + "Done (t=0.21s)\n", + "creating index...\n", + "index created!\n", + "Loaded 8045 predictions\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 65452/65452 [00:11<00:00, 5775.55it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Accumulating results\n", + "cgF1 metric, IoU type=segm\n", + " Average cgF1 @[ IoU=0.50:0.95] = 0.425\n", + " Average precision @[ IoU=0.50:0.95] = 0.677\n", + " Average recall @[ IoU=0.50:0.95] = 0.509\n", + " Average F1 @[ IoU=0.50:0.95] = 0.581\n", + " Average positive_macro_F1 @[ IoU=0.50:0.95] = 0.811\n", + " Average positive_micro_F1 @[ IoU=0.50:0.95] = 0.608\n", + " Average positive_micro_precision @[ IoU=0.50:0.95] = 0.757\n", + " Average IL_precision = 0.822\n", + " Average IL_recall = 0.607\n", + " Average IL_F1 = 0.698\n", + " Average IL_FPR = 0.004\n", + " Average IL_MCC = 0.699\n", + " Average cgF1 @[ IoU=0.50 ] = 0.482\n", + " Average precision @[ IoU=0.50 ] = 0.767\n", + " Average recall @[ IoU=0.50 ] = 0.577\n", + " Average F1 @[ IoU=0.50 ] = 0.658\n", + " Average positive_macro_F1 @[ IoU=0.50 ] = 0.905\n", + " Average positive_micro_F1 @[ IoU=0.50 ] = 0.689\n", + " Average positive_micro_precision @[ IoU=0.50 ] = 0.857\n", + " Average cgF1 @[ IoU=0.75 ] = 0.450\n", + " Average precision @[ IoU=0.75 ] = 0.717\n", + " Average recall @[ IoU=0.75 ] = 0.539\n", + " Average F1 @[ IoU=0.75 ] = 0.615\n", + " Average positive_macro_F1 @[ IoU=0.75 ] = 0.844\n", + " Average positive_micro_F1 @[ IoU=0.75 ] = 0.644\n", + " Average positive_micro_precision @[ IoU=0.75 ] = 0.801\n", + "\n", + "{'cgF1_eval_segm_cgF1': 0.42532135099113527, 'cgF1_eval_segm_precision': 0.6770954780038412, 'cgF1_eval_segm_recall': 0.5089505000184188, 'cgF1_eval_segm_F1': 0.5810550953744452, 'cgF1_eval_segm_positive_macro_F1': 0.8110173727833955, 'cgF1_eval_segm_positive_micro_F1': 0.6084924575531054, 'cgF1_eval_segm_positive_micro_precision': 0.7565875096115964, 'cgF1_eval_segm_IL_precision': 0.8222698066935975, 'cgF1_eval_segm_IL_recall': 0.6066350707705976, 'cgF1_eval_segm_IL_F1': 0.6981813291457964, 'cgF1_eval_segm_IL_FPR': 0.003917989709314776, 'cgF1_eval_segm_IL_MCC': 0.6989755513181787, 'cgF1_eval_segm_cgF1@0.5': 0.4817908882049053, 'cgF1_eval_segm_precision@0.5': 0.766985739738834, 'cgF1_eval_segm_recall@0.5': 0.5765180664001743, 'cgF1_eval_segm_F1@0.5': 0.6582016716689654, 'cgF1_eval_segm_positive_macro_F1@0.5': 0.9051793905267748, 'cgF1_eval_segm_positive_micro_F1@0.5': 0.6892814595536413, 'cgF1_eval_segm_positive_micro_precision@0.5': 0.8570310238186531, 'cgF1_eval_segm_cgF1@0.75': 0.4501092776435151, 'cgF1_eval_segm_precision@0.75': 0.7165538006875134, 'cgF1_eval_segm_recall@0.75': 0.5386100291574232, 'cgF1_eval_segm_F1@0.75': 0.6149194365440128, 'cgF1_eval_segm_positive_macro_F1@0.75': 0.8441369689666682, 'cgF1_eval_segm_positive_micro_F1@0.75': 0.643955681703983, 'cgF1_eval_segm_positive_micro_precision@0.75': 0.8006782989648239}\n", + "loading annotations into memory...\n", + "Done (t=0.18s)\n", + "creating index...\n", + "index created!\n", + "loading annotations into memory...\n", + "Done (t=0.19s)\n", + "creating index...\n", + "index created!\n", + "loading annotations into memory...\n", + "Done (t=0.18s)\n", + "creating index...\n", + "index created!\n", + "Loaded 8045 predictions\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 65452/65452 [00:10<00:00, 6310.26it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Accumulating results\n", + "cgF1 metric, IoU type=bbox\n", + " Average cgF1 @[ IoU=0.50:0.95] = 0.443\n", + " Average precision @[ IoU=0.50:0.95] = 0.706\n", + " Average recall @[ IoU=0.50:0.95] = 0.529\n", + " Average F1 @[ IoU=0.50:0.95] = 0.605\n", + " Average positive_macro_F1 @[ IoU=0.50:0.95] = 0.865\n", + " Average positive_micro_F1 @[ IoU=0.50:0.95] = 0.633\n", + " Average positive_micro_precision @[ IoU=0.50:0.95] = 0.788\n", + " Average IL_precision = 0.822\n", + " Average IL_recall = 0.607\n", + " Average IL_F1 = 0.698\n", + " Average IL_FPR = 0.004\n", + " Average IL_MCC = 0.699\n", + " Average cgF1 @[ IoU=0.50 ] = 0.483\n", + " Average precision @[ IoU=0.50 ] = 0.770\n", + " Average recall @[ IoU=0.50 ] = 0.578\n", + " Average F1 @[ IoU=0.50 ] = 0.660\n", + " Average positive_macro_F1 @[ IoU=0.50 ] = 0.909\n", + " Average positive_micro_F1 @[ IoU=0.50 ] = 0.691\n", + " Average positive_micro_precision @[ IoU=0.50 ] = 0.860\n", + " Average cgF1 @[ IoU=0.75 ] = 0.459\n", + " Average precision @[ IoU=0.75 ] = 0.732\n", + " Average recall @[ IoU=0.75 ] = 0.549\n", + " Average F1 @[ IoU=0.75 ] = 0.628\n", + " Average positive_macro_F1 @[ IoU=0.75 ] = 0.875\n", + " Average positive_micro_F1 @[ IoU=0.75 ] = 0.657\n", + " Average positive_micro_precision @[ IoU=0.75 ] = 0.818\n", + "\n", + "{'cgF1_eval_bbox_cgF1': 0.4427363803296873, 'cgF1_eval_bbox_precision': 0.7055568361999337, 'cgF1_eval_bbox_recall': 0.5294148470670139, 'cgF1_eval_bbox_F1': 0.6048754410558315, 'cgF1_eval_bbox_positive_macro_F1': 0.8646998403591537, 'cgF1_eval_bbox_positive_micro_F1': 0.6334075340614461, 'cgF1_eval_bbox_positive_micro_precision': 0.7883902742397528, 'cgF1_eval_bbox_IL_precision': 0.8222698066935975, 'cgF1_eval_bbox_IL_recall': 0.6066350707705976, 'cgF1_eval_bbox_IL_F1': 0.6981813291457964, 'cgF1_eval_bbox_IL_FPR': 0.003917989709314776, 'cgF1_eval_bbox_IL_MCC': 0.6989755513181787, 'cgF1_eval_bbox_cgF1@0.5': 0.4830441402948427, 'cgF1_eval_bbox_precision@0.5': 0.769787514130574, 'cgF1_eval_bbox_recall@0.5': 0.5776103612892368, 'cgF1_eval_bbox_F1@0.5': 0.6599450021609545, 'cgF1_eval_bbox_positive_macro_F1@0.5': 0.9093257078072422, 'cgF1_eval_bbox_positive_micro_F1@0.5': 0.6910744437110613, 'cgF1_eval_bbox_positive_micro_precision@0.5': 0.8601617307549769, 'cgF1_eval_bbox_cgF1@0.75': 0.4594543570790383, 'cgF1_eval_bbox_precision@0.75': 0.7321970410413953, 'cgF1_eval_bbox_recall@0.75': 0.5494043351540936, 'cgF1_eval_bbox_F1@0.75': 0.6277159968872291, 'cgF1_eval_bbox_positive_macro_F1@0.75': 0.8750107505715273, 'cgF1_eval_bbox_positive_micro_F1@0.75': 0.6573253616847772, 'cgF1_eval_bbox_positive_micro_precision@0.75': 0.8181580793592987}\n" + ] + } + ], + "source": [ + "results_gold = {}\n", + "results_gold_bbox = {}\n", + "\n", + "for subset_name, gts in saco_gold_gts.items():\n", + " print(\"Processing subset: \", subset_name)\n", + " gt_paths = [os.path.join(GT_DIR, gt) for gt in gts]\n", + " pred_path = os.path.join(PRED_DIR, f\"gold_{subset_name}/dumps/gold_{subset_name}/coco_predictions_segm.json\")\n", + " \n", + " evaluator = CGF1Evaluator(gt_path=gt_paths, verbose=True, iou_type=\"segm\") \n", + " summary = evaluator.evaluate(pred_path)\n", + " print(summary)\n", + "\n", + " cur_results = {}\n", + " cur_results[\"cgf1\"] = summary[\"cgF1_eval_segm_cgF1\"] * 100\n", + " cur_results[\"il_mcc\"] = summary[\"cgF1_eval_segm_IL_MCC\"]\n", + " cur_results[\"pmf1\"] = summary[\"cgF1_eval_segm_positive_micro_F1\"] * 100\n", + " results_gold[subset_name] = cur_results\n", + "\n", + " # Also eval bbox \n", + " evaluator = CGF1Evaluator(gt_path=gt_paths, verbose=True, iou_type=\"bbox\") \n", + " summary = evaluator.evaluate(pred_path)\n", + " print(summary)\n", + "\n", + " cur_results = {}\n", + " cur_results[\"cgf1\"] = summary[\"cgF1_eval_bbox_cgF1\"] * 100\n", + " cur_results[\"il_mcc\"] = summary[\"cgF1_eval_bbox_IL_MCC\"]\n", + " cur_results[\"pmf1\"] = summary[\"cgF1_eval_bbox_positive_micro_F1\"] * 100\n", + " results_gold_bbox[subset_name] = cur_results" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "c808a7cf-ecda-445a-a827-53a16dee4504", + "metadata": {}, + "outputs": [], + "source": [ + "# Compute averages\n", + "METRICS = [\"cgf1\", \"il_mcc\", \"pmf1\"]\n", + "avg_stats, avg_stats_bbox = {}, {}\n", + "for key in METRICS:\n", + " avg_stats[key] = sum(res[key] for res in results_gold.values()) / len(results_gold)\n", + " avg_stats_bbox[key] = sum(res[key] for res in results_gold_bbox.values()) / len(results_gold_bbox)\n", + "results_gold[\"Average\"] = avg_stats\n", + "results_gold_bbox[\"Average\"] = avg_stats_bbox" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "c26b1fb2", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
metaclip_npssa1b_npscrowdedfg_foodfg_sports_equipmentattributeswiki_commonAverage
cgf1il_mccpmf1cgf1il_mccpmf1cgf1il_mccpmf1cgf1il_mccpmf1cgf1il_mccpmf1cgf1il_mccpmf1cgf1il_mccpmf1cgf1il_mccpmf1
47.260.8158.5853.690.8662.5561.080.967.7353.410.7967.2865.520.8973.7554.930.7672.042.530.760.8554.060.8266.11
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Pretty print segmentation results\n", + "from IPython.display import HTML, display\n", + "\n", + "row1, row2, row3 = \"\", \"\", \"\"\n", + "for subset in results_gold:\n", + " row1 += f'{subset}'\n", + " row2 += \"\" + \"\".join(METRICS) + \"\"\n", + " row3 += \"\" + \"\".join([str(round(results_gold[subset][k], 2)) for k in METRICS]) + \"\"\n", + "\n", + "display(HTML(\n", + " f\"{row1}{row2}{row3}
\"\n", + "))" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "d3c85735-4aea-436d-a233-6f18cff29147", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
metaclip_npssa1b_npscrowdedfg_foodfg_sports_equipmentattributeswiki_commonAverage
cgf1il_mccpmf1cgf1il_mccpmf1cgf1il_mccpmf1cgf1il_mccpmf1cgf1il_mccpmf1cgf1il_mccpmf1cgf1il_mccpmf1cgf1il_mccpmf1
50.030.8161.9955.360.8664.4961.710.968.4353.790.867.6468.070.8976.6256.460.7673.9444.270.763.3455.670.8268.06
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Pretty print bbox detection results\n", + "from IPython.display import HTML, display\n", + "\n", + "row1, row2, row3 = \"\", \"\", \"\"\n", + "for subset in results_gold:\n", + " row1 += f'{subset}'\n", + " row2 += \"\" + \"\".join(METRICS) + \"\"\n", + " row3 += \"\" + \"\".join([str(round(results_gold_bbox[subset][k], 2)) for k in METRICS]) + \"\"\n", + "\n", + "display(HTML(\n", + " f\"{row1}{row2}{row3}
\"\n", + "))" + ] + }, + { + "cell_type": "markdown", + "id": "ef79428e-9212-4e26-8122-3e82841447de", + "metadata": {}, + "source": [ + "# SA-Co/Silver" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "55995b3b-1184-4d1f-b9bb-ad412eb734a3", + "metadata": {}, + "outputs": [], + "source": [ + "# Update to the directory where the GT annotation and PRED files exist\n", + "GT_DIR = # PUT YOUR PATH HERE\n", + "PRED_DIR = # PUT YOUR PATH HERE" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "cd254a73-7272-4df1-90d1-b818f5a7c6f7", + "metadata": {}, + "outputs": [], + "source": [ + "saco_silver_gts = {\n", + " \"bdd100k\": \"silver_bdd100k_merged_test.json\",\n", + " \"droid\": \"silver_droid_merged_test.json\",\n", + " \"ego4d\": \"silver_ego4d_merged_test.json\",\n", + " \"food_rec\": \"silver_food_rec_merged_test.json\",\n", + " \"geode\": \"silver_geode_merged_test.json\",\n", + " \"inaturalist\": \"silver_inaturalist_merged_test.json\",\n", + " \"nga_art\": \"silver_nga_art_merged_test.json\",\n", + " \"sav\": \"silver_sav_merged_test.json\",\n", + " \"yt1b\": \"silver_yt1b_merged_test.json\",\n", + " \"fathomnet\": \"silver_fathomnet_test.json\",\n", + "}" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "c736eabf-6e52-4f52-b4ab-111eaa490584", + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Processing subset: bdd100k\n", + "loading annotations into memory...\n", + "Done (t=0.12s)\n", + "creating index...\n", + "index created!\n", + "Loaded 31278 predictions\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 5439/5439 [00:01<00:00, 3496.20it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Accumulating results\n", + "cgF1 metric, IoU type=segm\n", + " Average cgF1 @[ IoU=0.50:0.95] = 0.466\n", + " Average precision @[ IoU=0.50:0.95] = 0.514\n", + " Average recall @[ IoU=0.50:0.95] = 0.644\n", + " Average F1 @[ IoU=0.50:0.95] = 0.572\n", + " Average positive_macro_F1 @[ IoU=0.50:0.95] = 0.669\n", + " Average positive_micro_F1 @[ IoU=0.50:0.95] = 0.601\n", + " Average positive_micro_precision @[ IoU=0.50:0.95] = 0.564\n", + " Average IL_precision = 0.870\n", + " Average IL_recall = 0.952\n", + " Average IL_F1 = 0.909\n", + " Average IL_FPR = 0.196\n", + " Average IL_MCC = 0.775\n", + " Average cgF1 @[ IoU=0.50 ] = 0.563\n", + " Average precision @[ IoU=0.50 ] = 0.621\n", + " Average recall @[ IoU=0.50 ] = 0.779\n", + " Average F1 @[ IoU=0.50 ] = 0.691\n", + " Average positive_macro_F1 @[ IoU=0.50 ] = 0.769\n", + " Average positive_micro_F1 @[ IoU=0.50 ] = 0.726\n", + " Average positive_micro_precision @[ IoU=0.50 ] = 0.681\n", + " Average cgF1 @[ IoU=0.75 ] = 0.507\n", + " Average precision @[ IoU=0.75 ] = 0.560\n", + " Average recall @[ IoU=0.75 ] = 0.701\n", + " Average F1 @[ IoU=0.75 ] = 0.623\n", + " Average positive_macro_F1 @[ IoU=0.75 ] = 0.708\n", + " Average positive_micro_F1 @[ IoU=0.75 ] = 0.655\n", + " Average positive_micro_precision @[ IoU=0.75 ] = 0.614\n", + "\n", + "{'cgF1_eval_segm_cgF1': 0.4660589364507352, 'cgF1_eval_segm_precision': 0.5141618280038143, 'cgF1_eval_segm_recall': 0.6443148170650035, 'cgF1_eval_segm_F1': 0.5718777188484898, 'cgF1_eval_segm_positive_macro_F1': 0.6686547809892744, 'cgF1_eval_segm_positive_micro_F1': 0.6012626762403565, 'cgF1_eval_segm_positive_micro_precision': 0.5636910131863614, 'cgF1_eval_segm_IL_precision': 0.8697795821143331, 'cgF1_eval_segm_IL_recall': 0.9523658301199219, 'cgF1_eval_segm_IL_F1': 0.9092006527905545, 'cgF1_eval_segm_IL_FPR': 0.19606986891001316, 'cgF1_eval_segm_IL_MCC': 0.7751336560003381, 'cgF1_eval_segm_cgF1@0.5': 0.5631327201709976, 'cgF1_eval_segm_precision@0.5': 0.6212459311276801, 'cgF1_eval_segm_recall@0.5': 0.7785057868277604, 'cgF1_eval_segm_F1@0.5': 0.6909925475999513, 'cgF1_eval_segm_positive_macro_F1@0.5': 0.7694734183454693, 'cgF1_eval_segm_positive_micro_F1@0.5': 0.7264975734336478, 'cgF1_eval_segm_positive_micro_precision@0.5': 0.6810905230262809, 'cgF1_eval_segm_cgF1@0.75': 0.5073531550281276, 'cgF1_eval_segm_precision@0.75': 0.559714336799628, 'cgF1_eval_segm_recall@0.75': 0.7013983164091225, 'cgF1_eval_segm_F1@0.75': 0.6225479779361666, 'cgF1_eval_segm_positive_macro_F1@0.75': 0.7080380721802793, 'cgF1_eval_segm_positive_micro_F1@0.75': 0.6545363513771957, 'cgF1_eval_segm_positive_micro_precision@0.75': 0.6136315930539561}\n", + "loading annotations into memory...\n", + "Done (t=0.09s)\n", + "creating index...\n", + "index created!\n", + "Loaded 31278 predictions\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 5439/5439 [00:01<00:00, 4945.36it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Accumulating results\n", + "cgF1 metric, IoU type=bbox\n", + " Average cgF1 @[ IoU=0.50:0.95] = 0.462\n", + " Average precision @[ IoU=0.50:0.95] = 0.510\n", + " Average recall @[ IoU=0.50:0.95] = 0.639\n", + " Average F1 @[ IoU=0.50:0.95] = 0.567\n", + " Average positive_macro_F1 @[ IoU=0.50:0.95] = 0.673\n", + " Average positive_micro_F1 @[ IoU=0.50:0.95] = 0.596\n", + " Average positive_micro_precision @[ IoU=0.50:0.95] = 0.559\n", + " Average IL_precision = 0.870\n", + " Average IL_recall = 0.952\n", + " Average IL_F1 = 0.909\n", + " Average IL_FPR = 0.196\n", + " Average IL_MCC = 0.775\n", + " Average cgF1 @[ IoU=0.50 ] = 0.562\n", + " Average precision @[ IoU=0.50 ] = 0.620\n", + " Average recall @[ IoU=0.50 ] = 0.777\n", + " Average F1 @[ IoU=0.50 ] = 0.689\n", + " Average positive_macro_F1 @[ IoU=0.50 ] = 0.769\n", + " Average positive_micro_F1 @[ IoU=0.50 ] = 0.725\n", + " Average positive_micro_precision @[ IoU=0.50 ] = 0.679\n", + " Average cgF1 @[ IoU=0.75 ] = 0.507\n", + " Average precision @[ IoU=0.75 ] = 0.559\n", + " Average recall @[ IoU=0.75 ] = 0.700\n", + " Average F1 @[ IoU=0.75 ] = 0.622\n", + " Average positive_macro_F1 @[ IoU=0.75 ] = 0.714\n", + " Average positive_micro_F1 @[ IoU=0.75 ] = 0.653\n", + " Average positive_micro_precision @[ IoU=0.75 ] = 0.613\n", + "\n", + "{'cgF1_eval_bbox_cgF1': 0.4621052346722842, 'cgF1_eval_bbox_precision': 0.5098004175871937, 'cgF1_eval_bbox_recall': 0.6388493756384388, 'cgF1_eval_bbox_F1': 0.5670263110638136, 'cgF1_eval_bbox_positive_macro_F1': 0.673487715877281, 'cgF1_eval_bbox_positive_micro_F1': 0.5961620052169204, 'cgF1_eval_bbox_positive_micro_precision': 0.5589094683054214, 'cgF1_eval_bbox_IL_precision': 0.8697795821143331, 'cgF1_eval_bbox_IL_recall': 0.9523658301199219, 'cgF1_eval_bbox_IL_F1': 0.9092006527905545, 'cgF1_eval_bbox_IL_FPR': 0.19606986891001316, 'cgF1_eval_bbox_IL_MCC': 0.7751336560003381, 'cgF1_eval_bbox_cgF1@0.5': 0.5618032590322063, 'cgF1_eval_bbox_precision@0.5': 0.6197793749934364, 'cgF1_eval_bbox_recall@0.5': 0.7766679921960209, 'cgF1_eval_bbox_F1@0.5': 0.6893612262495565, 'cgF1_eval_bbox_positive_macro_F1@0.5': 0.7694834511677656, 'cgF1_eval_bbox_positive_micro_F1@0.5': 0.7247824354977579, 'cgF1_eval_bbox_positive_micro_precision@0.5': 0.6794826936072514, 'cgF1_eval_bbox_cgF1@0.75': 0.5065439178140905, 'cgF1_eval_bbox_precision@0.75': 0.5588216504570449, 'cgF1_eval_bbox_recall@0.75': 0.7002796588071941, 'cgF1_eval_bbox_F1@0.75': 0.621554999724056, 'cgF1_eval_bbox_positive_macro_F1@0.75': 0.7144152159611694, 'cgF1_eval_bbox_positive_micro_F1@0.75': 0.6534923543738753, 'cgF1_eval_bbox_positive_micro_precision@0.75': 0.6126529142771555}\n", + "Processing subset: droid\n", + "loading annotations into memory...\n", + "Done (t=0.15s)\n", + "creating index...\n", + "index created!\n", + "Loaded 27006 predictions\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 9415/9415 [00:02<00:00, 4431.41it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Accumulating results\n", + "cgF1 metric, IoU type=segm\n", + " Average cgF1 @[ IoU=0.50:0.95] = 0.456\n", + " Average precision @[ IoU=0.50:0.95] = 0.501\n", + " Average recall @[ IoU=0.50:0.95] = 0.651\n", + " Average F1 @[ IoU=0.50:0.95] = 0.566\n", + " Average positive_macro_F1 @[ IoU=0.50:0.95] = 0.717\n", + " Average positive_micro_F1 @[ IoU=0.50:0.95] = 0.603\n", + " Average positive_micro_precision @[ IoU=0.50:0.95] = 0.562\n", + " Average IL_precision = 0.869\n", + " Average IL_recall = 0.881\n", + " Average IL_F1 = 0.875\n", + " Average IL_FPR = 0.125\n", + " Average IL_MCC = 0.755\n", + " Average cgF1 @[ IoU=0.50 ] = 0.517\n", + " Average precision @[ IoU=0.50 ] = 0.568\n", + " Average recall @[ IoU=0.50 ] = 0.739\n", + " Average F1 @[ IoU=0.50 ] = 0.642\n", + " Average positive_macro_F1 @[ IoU=0.50 ] = 0.782\n", + " Average positive_micro_F1 @[ IoU=0.50 ] = 0.685\n", + " Average positive_micro_precision @[ IoU=0.50 ] = 0.638\n", + " Average cgF1 @[ IoU=0.75 ] = 0.481\n", + " Average precision @[ IoU=0.75 ] = 0.528\n", + " Average recall @[ IoU=0.75 ] = 0.687\n", + " Average F1 @[ IoU=0.75 ] = 0.597\n", + " Average positive_macro_F1 @[ IoU=0.75 ] = 0.740\n", + " Average positive_micro_F1 @[ IoU=0.75 ] = 0.636\n", + " Average positive_micro_precision @[ IoU=0.75 ] = 0.593\n", + "\n", + "{'cgF1_eval_segm_cgF1': 0.4557789681577404, 'cgF1_eval_segm_precision': 0.5005361319569939, 'cgF1_eval_segm_recall': 0.651409427375482, 'cgF1_eval_segm_F1': 0.5660435085175799, 'cgF1_eval_segm_positive_macro_F1': 0.7165954275000161, 'cgF1_eval_segm_positive_micro_F1': 0.6034929143710873, 'cgF1_eval_segm_positive_micro_precision': 0.5622289982156015, 'cgF1_eval_segm_IL_precision': 0.8689550948037661, 'cgF1_eval_segm_IL_recall': 0.8807439823018065, 'cgF1_eval_segm_IL_F1': 0.874809323784164, 'cgF1_eval_segm_IL_FPR': 0.12528379770375977, 'cgF1_eval_segm_IL_MCC': 0.7552349949836895, 'cgF1_eval_segm_cgF1@0.5': 0.516971131551212, 'cgF1_eval_segm_precision@0.5': 0.5677317852045766, 'cgF1_eval_segm_recall@0.5': 0.7388594219103226, 'cgF1_eval_segm_F1@0.5': 0.6420399499626908, 'cgF1_eval_segm_positive_macro_F1@0.5': 0.7817224981491459, 'cgF1_eval_segm_positive_micro_F1@0.5': 0.6845169185551006, 'cgF1_eval_segm_positive_micro_precision@0.5': 0.637706755759541, 'cgF1_eval_segm_cgF1@0.75': 0.4806464217139674, 'cgF1_eval_segm_precision@0.75': 0.5278433016810116, 'cgF1_eval_segm_recall@0.75': 0.686947616643896, 'cgF1_eval_segm_F1@0.75': 0.5969271686567055, 'cgF1_eval_segm_positive_macro_F1@0.75': 0.7403811012887065, 'cgF1_eval_segm_positive_micro_F1@0.75': 0.6364196904360181, 'cgF1_eval_segm_positive_micro_precision@0.75': 0.5929018741536706}\n", + "loading annotations into memory...\n", + "Done (t=0.12s)\n", + "creating index...\n", + "index created!\n", + "Loaded 27006 predictions\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 9415/9415 [00:01<00:00, 5301.02it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Accumulating results\n", + "cgF1 metric, IoU type=bbox\n", + " Average cgF1 @[ IoU=0.50:0.95] = 0.461\n", + " Average precision @[ IoU=0.50:0.95] = 0.506\n", + " Average recall @[ IoU=0.50:0.95] = 0.659\n", + " Average F1 @[ IoU=0.50:0.95] = 0.573\n", + " Average positive_macro_F1 @[ IoU=0.50:0.95] = 0.726\n", + " Average positive_micro_F1 @[ IoU=0.50:0.95] = 0.611\n", + " Average positive_micro_precision @[ IoU=0.50:0.95] = 0.569\n", + " Average IL_precision = 0.869\n", + " Average IL_recall = 0.881\n", + " Average IL_F1 = 0.875\n", + " Average IL_FPR = 0.125\n", + " Average IL_MCC = 0.755\n", + " Average cgF1 @[ IoU=0.50 ] = 0.516\n", + " Average precision @[ IoU=0.50 ] = 0.566\n", + " Average recall @[ IoU=0.50 ] = 0.737\n", + " Average F1 @[ IoU=0.50 ] = 0.641\n", + " Average positive_macro_F1 @[ IoU=0.50 ] = 0.778\n", + " Average positive_micro_F1 @[ IoU=0.50 ] = 0.683\n", + " Average positive_micro_precision @[ IoU=0.50 ] = 0.636\n", + " Average cgF1 @[ IoU=0.75 ] = 0.484\n", + " Average precision @[ IoU=0.75 ] = 0.532\n", + " Average recall @[ IoU=0.75 ] = 0.692\n", + " Average F1 @[ IoU=0.75 ] = 0.601\n", + " Average positive_macro_F1 @[ IoU=0.75 ] = 0.743\n", + " Average positive_micro_F1 @[ IoU=0.75 ] = 0.641\n", + " Average positive_micro_precision @[ IoU=0.75 ] = 0.597\n", + "\n", + "{'cgF1_eval_bbox_cgF1': 0.46120814518784153, 'cgF1_eval_bbox_precision': 0.5064979590642793, 'cgF1_eval_bbox_recall': 0.6591682885927223, 'cgF1_eval_bbox_F1': 0.5727861714864271, 'cgF1_eval_bbox_positive_macro_F1': 0.7257891003555039, 'cgF1_eval_bbox_positive_micro_F1': 0.6106816398223206, 'cgF1_eval_bbox_positive_micro_precision': 0.5689256418104573, 'cgF1_eval_bbox_IL_precision': 0.8689550948037661, 'cgF1_eval_bbox_IL_recall': 0.8807439823018065, 'cgF1_eval_bbox_IL_F1': 0.874809323784164, 'cgF1_eval_bbox_IL_FPR': 0.12528379770375977, 'cgF1_eval_bbox_IL_MCC': 0.7552349949836895, 'cgF1_eval_bbox_cgF1@0.5': 0.5157342686709716, 'cgF1_eval_bbox_precision@0.5': 0.5663735751921255, 'cgF1_eval_bbox_recall@0.5': 0.7370918156378099, 'cgF1_eval_bbox_F1@0.5': 0.6405038516738359, 'cgF1_eval_bbox_positive_macro_F1@0.5': 0.7784650302224031, 'cgF1_eval_bbox_positive_micro_F1@0.5': 0.6828791993174386, 'cgF1_eval_bbox_positive_micro_precision@0.5': 0.6361811415113126, 'cgF1_eval_bbox_cgF1@0.75': 0.48416171621299414, 'cgF1_eval_bbox_precision@0.75': 0.5317034775058727, 'cgF1_eval_bbox_recall@0.75': 0.6919713397341954, 'cgF1_eval_bbox_F1@0.75': 0.6012929216844536, 'cgF1_eval_bbox_positive_macro_F1@0.75': 0.743302410171559, 'cgF1_eval_bbox_positive_micro_F1@0.75': 0.6410742608973653, 'cgF1_eval_bbox_positive_micro_precision@0.75': 0.5972378304381096}\n", + "Processing subset: ego4d\n", + "loading annotations into memory...\n", + "Done (t=0.36s)\n", + "creating index...\n", + "index created!\n", + "Loaded 54328 predictions\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 12428/12428 [00:04<00:00, 2599.59it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Accumulating results\n", + "cgF1 metric, IoU type=segm\n", + " Average cgF1 @[ IoU=0.50:0.95] = 0.386\n", + " Average precision @[ IoU=0.50:0.95] = 0.521\n", + " Average recall @[ IoU=0.50:0.95] = 0.689\n", + " Average F1 @[ IoU=0.50:0.95] = 0.594\n", + " Average positive_macro_F1 @[ IoU=0.50:0.95] = 0.765\n", + " Average positive_micro_F1 @[ IoU=0.50:0.95] = 0.626\n", + " Average positive_micro_precision @[ IoU=0.50:0.95] = 0.573\n", + " Average IL_precision = 0.901\n", + " Average IL_recall = 0.912\n", + " Average IL_F1 = 0.907\n", + " Average IL_FPR = 0.303\n", + " Average IL_MCC = 0.618\n", + " Average cgF1 @[ IoU=0.50 ] = 0.438\n", + " Average precision @[ IoU=0.50 ] = 0.591\n", + " Average recall @[ IoU=0.50 ] = 0.782\n", + " Average F1 @[ IoU=0.50 ] = 0.673\n", + " Average positive_macro_F1 @[ IoU=0.50 ] = 0.842\n", + " Average positive_micro_F1 @[ IoU=0.50 ] = 0.709\n", + " Average positive_micro_precision @[ IoU=0.50 ] = 0.649\n", + " Average cgF1 @[ IoU=0.75 ] = 0.404\n", + " Average precision @[ IoU=0.75 ] = 0.545\n", + " Average recall @[ IoU=0.75 ] = 0.721\n", + " Average F1 @[ IoU=0.75 ] = 0.621\n", + " Average positive_macro_F1 @[ IoU=0.75 ] = 0.795\n", + " Average positive_micro_F1 @[ IoU=0.75 ] = 0.655\n", + " Average positive_micro_precision @[ IoU=0.75 ] = 0.599\n", + "\n", + "{'cgF1_eval_segm_cgF1': 0.38643115983316056, 'cgF1_eval_segm_precision': 0.5213111360174665, 'cgF1_eval_segm_recall': 0.6893477933583816, 'cgF1_eval_segm_F1': 0.5936188825546556, 'cgF1_eval_segm_positive_macro_F1': 0.7647892994501866, 'cgF1_eval_segm_positive_micro_F1': 0.6256311694898511, 'cgF1_eval_segm_positive_micro_precision': 0.5727797240533252, 'cgF1_eval_segm_IL_precision': 0.9011314369354836, 'cgF1_eval_segm_IL_recall': 0.9124197001164432, 'cgF1_eval_segm_IL_F1': 0.9067399372269952, 'cgF1_eval_segm_IL_FPR': 0.30278497399521215, 'cgF1_eval_segm_IL_MCC': 0.6176660925450091, 'cgF1_eval_segm_cgF1@0.5': 0.4381751660817137, 'cgF1_eval_segm_precision@0.5': 0.5911103463493724, 'cgF1_eval_segm_recall@0.5': 0.7816457097006957, 'cgF1_eval_segm_F1@0.5': 0.6731060611161825, 'cgF1_eval_segm_positive_macro_F1@0.5': 0.8419527726712451, 'cgF1_eval_segm_positive_micro_F1@0.5': 0.7094045980025753, 'cgF1_eval_segm_positive_micro_precision@0.5': 0.6494701487744837, 'cgF1_eval_segm_cgF1@0.75': 0.4042830454832554, 'cgF1_eval_segm_precision@0.75': 0.545392138165807, 'cgF1_eval_segm_recall@0.75': 0.7211909375882043, 'cgF1_eval_segm_F1@0.75': 0.6210422717504098, 'cgF1_eval_segm_positive_macro_F1@0.75': 0.7947833780726534, 'cgF1_eval_segm_positive_micro_F1@0.75': 0.6545333317836213, 'cgF1_eval_segm_positive_micro_precision@0.75': 0.5992382222753772}\n", + "loading annotations into memory...\n", + "Done (t=0.31s)\n", + "creating index...\n", + "index created!\n", + "Loaded 54328 predictions\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 12428/12428 [00:03<00:00, 4068.74it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Accumulating results\n", + "cgF1 metric, IoU type=bbox\n", + " Average cgF1 @[ IoU=0.50:0.95] = 0.388\n", + " Average precision @[ IoU=0.50:0.95] = 0.523\n", + " Average recall @[ IoU=0.50:0.95] = 0.692\n", + " Average F1 @[ IoU=0.50:0.95] = 0.596\n", + " Average positive_macro_F1 @[ IoU=0.50:0.95] = 0.778\n", + " Average positive_micro_F1 @[ IoU=0.50:0.95] = 0.628\n", + " Average positive_micro_precision @[ IoU=0.50:0.95] = 0.575\n", + " Average IL_precision = 0.901\n", + " Average IL_recall = 0.912\n", + " Average IL_F1 = 0.907\n", + " Average IL_FPR = 0.303\n", + " Average IL_MCC = 0.618\n", + " Average cgF1 @[ IoU=0.50 ] = 0.437\n", + " Average precision @[ IoU=0.50 ] = 0.589\n", + " Average recall @[ IoU=0.50 ] = 0.779\n", + " Average F1 @[ IoU=0.50 ] = 0.671\n", + " Average positive_macro_F1 @[ IoU=0.50 ] = 0.840\n", + " Average positive_micro_F1 @[ IoU=0.50 ] = 0.707\n", + " Average positive_micro_precision @[ IoU=0.50 ] = 0.647\n", + " Average cgF1 @[ IoU=0.75 ] = 0.404\n", + " Average precision @[ IoU=0.75 ] = 0.545\n", + " Average recall @[ IoU=0.75 ] = 0.721\n", + " Average F1 @[ IoU=0.75 ] = 0.621\n", + " Average positive_macro_F1 @[ IoU=0.75 ] = 0.796\n", + " Average positive_micro_F1 @[ IoU=0.75 ] = 0.654\n", + " Average positive_micro_precision @[ IoU=0.75 ] = 0.599\n", + "\n", + "{'cgF1_eval_bbox_cgF1': 0.38789930948498, 'cgF1_eval_bbox_precision': 0.5232915719125053, 'cgF1_eval_bbox_recall': 0.6919665924206118, 'cgF1_eval_bbox_F1': 0.5958741983272995, 'cgF1_eval_bbox_positive_macro_F1': 0.7777412549940348, 'cgF1_eval_bbox_positive_micro_F1': 0.6280081004393389, 'cgF1_eval_bbox_positive_micro_precision': 0.5749556866351561, 'cgF1_eval_bbox_IL_precision': 0.9011314369354836, 'cgF1_eval_bbox_IL_recall': 0.9124197001164432, 'cgF1_eval_bbox_IL_F1': 0.9067399372269952, 'cgF1_eval_bbox_IL_FPR': 0.30278497399521215, 'cgF1_eval_bbox_IL_MCC': 0.6176660925450091, 'cgF1_eval_bbox_cgF1@0.5': 0.43664849398208544, 'cgF1_eval_bbox_precision@0.5': 0.589050967602365, 'cgF1_eval_bbox_recall@0.5': 0.7789225217677006, 'cgF1_eval_bbox_F1@0.5': 0.6707608453779862, 'cgF1_eval_bbox_positive_macro_F1@0.5': 0.8401608683011329, 'cgF1_eval_bbox_positive_micro_F1@0.5': 0.7069329193430948, 'cgF1_eval_bbox_positive_micro_precision@0.5': 0.6472074493826321, 'cgF1_eval_bbox_cgF1@0.75': 0.4039522665286139, 'cgF1_eval_bbox_precision@0.75': 0.5449459394372886, 'cgF1_eval_bbox_recall@0.75': 0.7206009135360554, 'cgF1_eval_bbox_F1@0.75': 0.6205341416742646, 'cgF1_eval_bbox_positive_macro_F1@0.75': 0.7957011848300353, 'cgF1_eval_bbox_positive_micro_F1@0.75': 0.6539978014078505, 'cgF1_eval_bbox_positive_micro_precision@0.75': 0.598747970740476}\n", + "Processing subset: food_rec\n", + "loading annotations into memory...\n", + "Done (t=0.31s)\n", + "creating index...\n", + "index created!\n", + "Loaded 54984 predictions\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 20888/20888 [00:04<00:00, 4826.96it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Accumulating results\n", + "cgF1 metric, IoU type=segm\n", + " Average cgF1 @[ IoU=0.50:0.95] = 0.530\n", + " Average precision @[ IoU=0.50:0.95] = 0.598\n", + " Average recall @[ IoU=0.50:0.95] = 0.674\n", + " Average F1 @[ IoU=0.50:0.95] = 0.634\n", + " Average positive_macro_F1 @[ IoU=0.50:0.95] = 0.839\n", + " Average positive_micro_F1 @[ IoU=0.50:0.95] = 0.672\n", + " Average positive_micro_precision @[ IoU=0.50:0.95] = 0.670\n", + " Average IL_precision = 0.863\n", + " Average IL_recall = 0.903\n", + " Average IL_F1 = 0.883\n", + " Average IL_FPR = 0.112\n", + " Average IL_MCC = 0.788\n", + " Average cgF1 @[ IoU=0.50 ] = 0.576\n", + " Average precision @[ IoU=0.50 ] = 0.650\n", + " Average recall @[ IoU=0.50 ] = 0.733\n", + " Average F1 @[ IoU=0.50 ] = 0.689\n", + " Average positive_macro_F1 @[ IoU=0.50 ] = 0.883\n", + " Average positive_micro_F1 @[ IoU=0.50 ] = 0.731\n", + " Average positive_micro_precision @[ IoU=0.50 ] = 0.729\n", + " Average cgF1 @[ IoU=0.75 ] = 0.549\n", + " Average precision @[ IoU=0.75 ] = 0.620\n", + " Average recall @[ IoU=0.75 ] = 0.699\n", + " Average F1 @[ IoU=0.75 ] = 0.657\n", + " Average positive_macro_F1 @[ IoU=0.75 ] = 0.855\n", + " Average positive_micro_F1 @[ IoU=0.75 ] = 0.697\n", + " Average positive_micro_precision @[ IoU=0.75 ] = 0.695\n", + "\n", + "{'cgF1_eval_segm_cgF1': 0.5296031530873976, 'cgF1_eval_segm_precision': 0.5979719064128862, 'cgF1_eval_segm_recall': 0.67394967253306, 'cgF1_eval_segm_F1': 0.6336417199495971, 'cgF1_eval_segm_positive_macro_F1': 0.8386411473003422, 'cgF1_eval_segm_positive_micro_F1': 0.6721473109964069, 'cgF1_eval_segm_positive_micro_precision': 0.6704540304049686, 'cgF1_eval_segm_IL_precision': 0.8631095331666377, 'cgF1_eval_segm_IL_recall': 0.9029932269059434, 'cgF1_eval_segm_IL_F1': 0.882600535877229, 'cgF1_eval_segm_IL_FPR': 0.11172660643329413, 'cgF1_eval_segm_IL_MCC': 0.7879272064665432, 'cgF1_eval_segm_cgF1@0.5': 0.5756816178619664, 'cgF1_eval_segm_precision@0.5': 0.6499949694948377, 'cgF1_eval_segm_recall@0.5': 0.7325827386558004, 'cgF1_eval_segm_F1@0.5': 0.6887723611338412, 'cgF1_eval_segm_positive_macro_F1@0.5': 0.8833570881072849, 'cgF1_eval_segm_positive_micro_F1@0.5': 0.7306279223985785, 'cgF1_eval_segm_positive_micro_precision@0.5': 0.7287829785432497, 'cgF1_eval_segm_cgF1@0.75': 0.5489294451647743, 'cgF1_eval_segm_precision@0.75': 0.6197914899943298, 'cgF1_eval_segm_recall@0.75': 0.6985416325429136, 'cgF1_eval_segm_F1@0.75': 0.6567646881074378, 'cgF1_eval_segm_positive_macro_F1@0.75': 0.854577862666958, 'cgF1_eval_segm_positive_micro_F1@0.75': 0.6966753282025206, 'cgF1_eval_segm_positive_micro_precision@0.75': 0.6949184368378619}\n", + "loading annotations into memory...\n", + "Done (t=0.27s)\n", + "creating index...\n", + "index created!\n", + "Loaded 54984 predictions\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 20888/20888 [00:03<00:00, 6545.85it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Accumulating results\n", + "cgF1 metric, IoU type=bbox\n", + " Average cgF1 @[ IoU=0.50:0.95] = 0.534\n", + " Average precision @[ IoU=0.50:0.95] = 0.602\n", + " Average recall @[ IoU=0.50:0.95] = 0.679\n", + " Average F1 @[ IoU=0.50:0.95] = 0.638\n", + " Average positive_macro_F1 @[ IoU=0.50:0.95] = 0.869\n", + " Average positive_micro_F1 @[ IoU=0.50:0.95] = 0.677\n", + " Average positive_micro_precision @[ IoU=0.50:0.95] = 0.675\n", + " Average IL_precision = 0.863\n", + " Average IL_recall = 0.903\n", + " Average IL_F1 = 0.883\n", + " Average IL_FPR = 0.112\n", + " Average IL_MCC = 0.788\n", + " Average cgF1 @[ IoU=0.50 ] = 0.577\n", + " Average precision @[ IoU=0.50 ] = 0.652\n", + " Average recall @[ IoU=0.50 ] = 0.735\n", + " Average F1 @[ IoU=0.50 ] = 0.691\n", + " Average positive_macro_F1 @[ IoU=0.50 ] = 0.897\n", + " Average positive_micro_F1 @[ IoU=0.50 ] = 0.733\n", + " Average positive_micro_precision @[ IoU=0.50 ] = 0.731\n", + " Average cgF1 @[ IoU=0.75 ] = 0.554\n", + " Average precision @[ IoU=0.75 ] = 0.626\n", + " Average recall @[ IoU=0.75 ] = 0.705\n", + " Average F1 @[ IoU=0.75 ] = 0.663\n", + " Average positive_macro_F1 @[ IoU=0.75 ] = 0.881\n", + " Average positive_micro_F1 @[ IoU=0.75 ] = 0.703\n", + " Average positive_micro_precision @[ IoU=0.75 ] = 0.701\n", + "\n", + "{'cgF1_eval_bbox_cgF1': 0.5335343269652235, 'cgF1_eval_bbox_precision': 0.602410242357245, 'cgF1_eval_bbox_recall': 0.6789519394024788, 'cgF1_eval_bbox_F1': 0.6383451782260778, 'cgF1_eval_bbox_positive_macro_F1': 0.8687478456564411, 'cgF1_eval_bbox_positive_micro_F1': 0.6771365712295384, 'cgF1_eval_bbox_positive_micro_precision': 0.675430351516837, 'cgF1_eval_bbox_IL_precision': 0.8631095331666377, 'cgF1_eval_bbox_IL_recall': 0.9029932269059434, 'cgF1_eval_bbox_IL_F1': 0.882600535877229, 'cgF1_eval_bbox_IL_FPR': 0.11172660643329413, 'cgF1_eval_bbox_IL_MCC': 0.7879272064665432, 'cgF1_eval_bbox_cgF1@0.5': 0.5771958917885865, 'cgF1_eval_bbox_precision@0.5': 0.6517046004099607, 'cgF1_eval_bbox_recall@0.5': 0.734509593718794, 'cgF1_eval_bbox_F1@0.5': 0.6905841162112941, 'cgF1_eval_bbox_positive_macro_F1@0.5': 0.8970825996312395, 'cgF1_eval_bbox_positive_micro_F1@0.5': 0.7325497673535344, 'cgF1_eval_bbox_positive_micro_precision@0.5': 0.730699839394498, 'cgF1_eval_bbox_cgF1@0.75': 0.5540364082092076, 'cgF1_eval_bbox_precision@0.75': 0.6255573040610194, 'cgF1_eval_bbox_recall@0.75': 0.705040045696539, 'cgF1_eval_bbox_F1@0.75': 0.6628749209147204, 'cgF1_eval_bbox_positive_macro_F1@0.75': 0.8806948276411976, 'cgF1_eval_bbox_positive_micro_F1@0.75': 0.703156844518394, 'cgF1_eval_bbox_positive_micro_precision@0.75': 0.7013831440224643}\n", + "Processing subset: geode\n", + "loading annotations into memory...\n", + "Done (t=0.16s)\n", + "creating index...\n", + "index created!\n", + "Loaded 14206 predictions\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 14797/14797 [00:02<00:00, 5611.66it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Accumulating results\n", + "cgF1 metric, IoU type=segm\n", + " Average cgF1 @[ IoU=0.50:0.95] = 0.701\n", + " Average precision @[ IoU=0.50:0.95] = 0.672\n", + " Average recall @[ IoU=0.50:0.95] = 0.840\n", + " Average F1 @[ IoU=0.50:0.95] = 0.747\n", + " Average positive_macro_F1 @[ IoU=0.50:0.95] = 0.857\n", + " Average positive_micro_F1 @[ IoU=0.50:0.95] = 0.787\n", + " Average positive_micro_precision @[ IoU=0.50:0.95] = 0.741\n", + " Average IL_precision = 0.881\n", + " Average IL_recall = 0.975\n", + " Average IL_F1 = 0.925\n", + " Average IL_FPR = 0.062\n", + " Average IL_MCC = 0.890\n", + " Average cgF1 @[ IoU=0.50 ] = 0.745\n", + " Average precision @[ IoU=0.50 ] = 0.714\n", + " Average recall @[ IoU=0.50 ] = 0.893\n", + " Average F1 @[ IoU=0.50 ] = 0.793\n", + " Average positive_macro_F1 @[ IoU=0.50 ] = 0.899\n", + " Average positive_micro_F1 @[ IoU=0.50 ] = 0.837\n", + " Average positive_micro_precision @[ IoU=0.50 ] = 0.787\n", + " Average cgF1 @[ IoU=0.75 ] = 0.716\n", + " Average precision @[ IoU=0.75 ] = 0.687\n", + " Average recall @[ IoU=0.75 ] = 0.859\n", + " Average F1 @[ IoU=0.75 ] = 0.763\n", + " Average positive_macro_F1 @[ IoU=0.75 ] = 0.869\n", + " Average positive_micro_F1 @[ IoU=0.75 ] = 0.805\n", + " Average positive_micro_precision @[ IoU=0.75 ] = 0.758\n", + "\n", + "{'cgF1_eval_segm_cgF1': 0.7006886839682105, 'cgF1_eval_segm_precision': 0.6718811161345404, 'cgF1_eval_segm_recall': 0.8400571547562657, 'cgF1_eval_segm_F1': 0.7465664720739594, 'cgF1_eval_segm_positive_macro_F1': 0.8565758633142841, 'cgF1_eval_segm_positive_micro_F1': 0.7872827580159756, 'cgF1_eval_segm_positive_micro_precision': 0.740835425028983, 'cgF1_eval_segm_IL_precision': 0.8808211364986084, 'cgF1_eval_segm_IL_recall': 0.9747580982383764, 'cgF1_eval_segm_IL_F1': 0.9254113832735964, 'cgF1_eval_segm_IL_FPR': 0.06243154435303878, 'cgF1_eval_segm_IL_MCC': 0.8900089286014722, 'cgF1_eval_segm_cgF1@0.5': 0.7445249438888863, 'cgF1_eval_segm_precision@0.5': 0.7139124677344605, 'cgF1_eval_segm_recall@0.5': 0.8926092161071292, 'cgF1_eval_segm_F1@0.5': 0.7932730581226168, 'cgF1_eval_segm_positive_macro_F1@0.5': 0.8987084032239174, 'cgF1_eval_segm_positive_micro_F1@0.5': 0.8365364885258015, 'cgF1_eval_segm_positive_micro_precision@0.5': 0.7871804010661337, 'cgF1_eval_segm_cgF1@0.75': 0.7164815452728956, 'cgF1_eval_segm_precision@0.75': 0.6870237242867002, 'cgF1_eval_segm_recall@0.75': 0.8589900522799775, 'cgF1_eval_segm_F1@0.75': 0.7633934239343049, 'cgF1_eval_segm_positive_macro_F1@0.75': 0.8685617156358091, 'cgF1_eval_segm_positive_micro_F1@0.75': 0.8050273679824188, 'cgF1_eval_segm_positive_micro_precision@0.75': 0.7575320998975862}\n", + "loading annotations into memory...\n", + "Done (t=0.11s)\n", + "creating index...\n", + "index created!\n", + "Loaded 14206 predictions\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 14797/14797 [00:01<00:00, 7792.81it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Accumulating results\n", + "cgF1 metric, IoU type=bbox\n", + " Average cgF1 @[ IoU=0.50:0.95] = 0.708\n", + " Average precision @[ IoU=0.50:0.95] = 0.679\n", + " Average recall @[ IoU=0.50:0.95] = 0.848\n", + " Average F1 @[ IoU=0.50:0.95] = 0.754\n", + " Average positive_macro_F1 @[ IoU=0.50:0.95] = 0.872\n", + " Average positive_micro_F1 @[ IoU=0.50:0.95] = 0.795\n", + " Average positive_micro_precision @[ IoU=0.50:0.95] = 0.748\n", + " Average IL_precision = 0.881\n", + " Average IL_recall = 0.975\n", + " Average IL_F1 = 0.925\n", + " Average IL_FPR = 0.062\n", + " Average IL_MCC = 0.890\n", + " Average cgF1 @[ IoU=0.50 ] = 0.744\n", + " Average precision @[ IoU=0.50 ] = 0.714\n", + " Average recall @[ IoU=0.50 ] = 0.892\n", + " Average F1 @[ IoU=0.50 ] = 0.793\n", + " Average positive_macro_F1 @[ IoU=0.50 ] = 0.901\n", + " Average positive_micro_F1 @[ IoU=0.50 ] = 0.836\n", + " Average positive_micro_precision @[ IoU=0.50 ] = 0.787\n", + " Average cgF1 @[ IoU=0.75 ] = 0.723\n", + " Average precision @[ IoU=0.75 ] = 0.693\n", + " Average recall @[ IoU=0.75 ] = 0.867\n", + " Average F1 @[ IoU=0.75 ] = 0.770\n", + " Average positive_macro_F1 @[ IoU=0.75 ] = 0.882\n", + " Average positive_micro_F1 @[ IoU=0.75 ] = 0.812\n", + " Average positive_micro_precision @[ IoU=0.75 ] = 0.764\n", + "\n", + "{'cgF1_eval_bbox_cgF1': 0.7076143815954025, 'cgF1_eval_bbox_precision': 0.6785216560143518, 'cgF1_eval_bbox_recall': 0.8483598632317971, 'cgF1_eval_bbox_F1': 0.7539456529790385, 'cgF1_eval_bbox_positive_macro_F1': 0.8719976344244793, 'cgF1_eval_bbox_positive_micro_F1': 0.7950643626770375, 'cgF1_eval_bbox_positive_micro_precision': 0.7481574751151425, 'cgF1_eval_bbox_IL_precision': 0.8808211364986084, 'cgF1_eval_bbox_IL_recall': 0.9747580982383764, 'cgF1_eval_bbox_IL_F1': 0.9254113832735964, 'cgF1_eval_bbox_IL_FPR': 0.06243154435303878, 'cgF1_eval_bbox_IL_MCC': 0.8900089286014722, 'cgF1_eval_bbox_cgF1@0.5': 0.7442978718352845, 'cgF1_eval_bbox_precision@0.5': 0.7136947451154503, 'cgF1_eval_bbox_recall@0.5': 0.8923369961571118, 'cgF1_eval_bbox_F1@0.5': 0.7930311177647804, 'cgF1_eval_bbox_positive_macro_F1@0.5': 0.9007468930624848, 'cgF1_eval_bbox_positive_micro_F1@0.5': 0.836281353946468, 'cgF1_eval_bbox_positive_micro_precision@0.5': 0.7869403338501941, 'cgF1_eval_bbox_cgF1@0.75': 0.7230666348257286, 'cgF1_eval_bbox_precision@0.75': 0.6933376802379961, 'cgF1_eval_bbox_recall@0.75': 0.8668844308304827, 'cgF1_eval_bbox_F1@0.75': 0.7704096943096695, 'cgF1_eval_bbox_positive_macro_F1@0.75': 0.8818653539703468, 'cgF1_eval_bbox_positive_micro_F1@0.75': 0.8124262707812712, 'cgF1_eval_bbox_positive_micro_precision@0.75': 0.7644940491598362}\n", + "Processing subset: inaturalist\n", + "loading annotations into memory...\n", + "Done (t=3.87s)\n", + "creating index...\n", + "index created!\n", + "Loaded 53887 predictions\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1439027/1439027 [01:22<00:00, 17398.82it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Accumulating results\n", + "cgF1 metric, IoU type=segm\n", + " Average cgF1 @[ IoU=0.50:0.95] = 0.658\n", + " Average precision @[ IoU=0.50:0.95] = 0.776\n", + " Average recall @[ IoU=0.50:0.95] = 0.722\n", + " Average F1 @[ IoU=0.50:0.95] = 0.748\n", + " Average positive_macro_F1 @[ IoU=0.50:0.95] = 0.935\n", + " Average positive_micro_F1 @[ IoU=0.50:0.95] = 0.807\n", + " Average positive_micro_precision @[ IoU=0.50:0.95] = 0.915\n", + " Average IL_precision = 0.848\n", + " Average IL_recall = 0.796\n", + " Average IL_F1 = 0.821\n", + " Average IL_FPR = 0.005\n", + " Average IL_MCC = 0.816\n", + " Average cgF1 @[ IoU=0.50 ] = 0.692\n", + " Average precision @[ IoU=0.50 ] = 0.816\n", + " Average recall @[ IoU=0.50 ] = 0.759\n", + " Average F1 @[ IoU=0.50 ] = 0.786\n", + " Average positive_macro_F1 @[ IoU=0.50 ] = 0.981\n", + " Average positive_micro_F1 @[ IoU=0.50 ] = 0.848\n", + " Average positive_micro_precision @[ IoU=0.50 ] = 0.962\n", + " Average cgF1 @[ IoU=0.75 ] = 0.680\n", + " Average precision @[ IoU=0.75 ] = 0.801\n", + " Average recall @[ IoU=0.75 ] = 0.745\n", + " Average F1 @[ IoU=0.75 ] = 0.772\n", + " Average positive_macro_F1 @[ IoU=0.75 ] = 0.965\n", + " Average positive_micro_F1 @[ IoU=0.75 ] = 0.833\n", + " Average positive_micro_precision @[ IoU=0.75 ] = 0.945\n", + "\n", + "{'cgF1_eval_segm_cgF1': 0.6580192923425129, 'cgF1_eval_segm_precision': 0.7756659354606497, 'cgF1_eval_segm_recall': 0.721546369069409, 'cgF1_eval_segm_F1': 0.7475780939907699, 'cgF1_eval_segm_positive_macro_F1': 0.9345464754990648, 'cgF1_eval_segm_positive_micro_F1': 0.8066917804453706, 'cgF1_eval_segm_positive_micro_precision': 0.914747514495301, 'cgF1_eval_segm_IL_precision': 0.8477058323766221, 'cgF1_eval_segm_IL_recall': 0.7959997413454318, 'cgF1_eval_segm_IL_F1': 0.8210390273987034, 'cgF1_eval_segm_IL_FPR': 0.004764366701848471, 'cgF1_eval_segm_IL_MCC': 0.815701000423264, 'cgF1_eval_segm_cgF1@0.5': 0.6920242595075669, 'cgF1_eval_segm_precision@0.5': 0.81574816894191, 'cgF1_eval_segm_recall@0.5': 0.7588319951494304, 'cgF1_eval_segm_F1@0.5': 0.7862114792496716, 'cgF1_eval_segm_positive_macro_F1@0.5': 0.9812705268789929, 'cgF1_eval_segm_positive_micro_F1@0.5': 0.8483798096955603, 'cgF1_eval_segm_positive_micro_precision@0.5': 0.9620167341118989, 'cgF1_eval_segm_cgF1@0.75': 0.6797028038245808, 'cgF1_eval_segm_precision@0.75': 0.8012246594574692, 'cgF1_eval_segm_recall@0.75': 0.7453218162752996, 'cgF1_eval_segm_F1@0.75': 0.7722129495022094, 'cgF1_eval_segm_positive_macro_F1@0.75': 0.9650080958461289, 'cgF1_eval_segm_positive_micro_F1@0.75': 0.8332744516334855, 'cgF1_eval_segm_positive_micro_precision@0.75': 0.9448890718087305}\n", + "loading annotations into memory...\n", + "Done (t=3.67s)\n", + "creating index...\n", + "index created!\n", + "Loaded 53887 predictions\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 1439027/1439027 [01:18<00:00, 18312.98it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Accumulating results\n", + "cgF1 metric, IoU type=bbox\n", + " Average cgF1 @[ IoU=0.50:0.95] = 0.652\n", + " Average precision @[ IoU=0.50:0.95] = 0.769\n", + " Average recall @[ IoU=0.50:0.95] = 0.715\n", + " Average F1 @[ IoU=0.50:0.95] = 0.741\n", + " Average positive_macro_F1 @[ IoU=0.50:0.95] = 0.926\n", + " Average positive_micro_F1 @[ IoU=0.50:0.95] = 0.800\n", + " Average positive_micro_precision @[ IoU=0.50:0.95] = 0.907\n", + " Average IL_precision = 0.848\n", + " Average IL_recall = 0.796\n", + " Average IL_F1 = 0.821\n", + " Average IL_FPR = 0.005\n", + " Average IL_MCC = 0.816\n", + " Average cgF1 @[ IoU=0.50 ] = 0.691\n", + " Average precision @[ IoU=0.50 ] = 0.815\n", + " Average recall @[ IoU=0.50 ] = 0.758\n", + " Average F1 @[ IoU=0.50 ] = 0.786\n", + " Average positive_macro_F1 @[ IoU=0.50 ] = 0.981\n", + " Average positive_micro_F1 @[ IoU=0.50 ] = 0.848\n", + " Average positive_micro_precision @[ IoU=0.50 ] = 0.961\n", + " Average cgF1 @[ IoU=0.75 ] = 0.665\n", + " Average precision @[ IoU=0.75 ] = 0.784\n", + " Average recall @[ IoU=0.75 ] = 0.729\n", + " Average F1 @[ IoU=0.75 ] = 0.756\n", + " Average positive_macro_F1 @[ IoU=0.75 ] = 0.944\n", + " Average positive_micro_F1 @[ IoU=0.75 ] = 0.816\n", + " Average positive_micro_precision @[ IoU=0.75 ] = 0.925\n", + "\n", + "{'cgF1_eval_bbox_cgF1': 0.6524418175713689, 'cgF1_eval_bbox_precision': 0.7690916710456228, 'cgF1_eval_bbox_recall': 0.7154308025592622, 'cgF1_eval_bbox_F1': 0.741241468795898, 'cgF1_eval_bbox_positive_macro_F1': 0.9264232147142696, 'cgF1_eval_bbox_positive_micro_F1': 0.7998541343369929, 'cgF1_eval_bbox_positive_micro_precision': 0.906994444831736, 'cgF1_eval_bbox_IL_precision': 0.8477058323766221, 'cgF1_eval_bbox_IL_recall': 0.7959997413454318, 'cgF1_eval_bbox_IL_F1': 0.8210390273987034, 'cgF1_eval_bbox_IL_FPR': 0.004764366701848471, 'cgF1_eval_bbox_IL_MCC': 0.815701000423264, 'cgF1_eval_bbox_cgF1@0.5': 0.6914428818421255, 'cgF1_eval_bbox_precision@0.5': 0.8150628891945468, 'cgF1_eval_bbox_recall@0.5': 0.7581945285663284, 'cgF1_eval_bbox_F1@0.5': 0.7855509702356662, 'cgF1_eval_bbox_positive_macro_F1@0.5': 0.9805502412911423, 'cgF1_eval_bbox_positive_micro_F1@0.5': 0.8476670759056795, 'cgF1_eval_bbox_positive_micro_precision@0.5': 0.9612085795740021, 'cgF1_eval_bbox_cgF1@0.75': 0.6652433786643561, 'cgF1_eval_bbox_precision@0.75': 0.7841810889665958, 'cgF1_eval_bbox_recall@0.75': 0.7294674054504063, 'cgF1_eval_bbox_F1@0.75': 0.7557854511254823, 'cgF1_eval_bbox_positive_macro_F1@0.75': 0.944243447752961, 'cgF1_eval_bbox_positive_micro_F1@0.75': 0.8155480725402617, 'cgF1_eval_bbox_positive_micro_precision@0.75': 0.9247894863661997}\n", + "Processing subset: nga_art\n", + "loading annotations into memory...\n", + "Done (t=0.45s)\n", + "creating index...\n", + "index created!\n", + "Loaded 34558 predictions\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 22221/22221 [00:04<00:00, 5095.16it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Accumulating results\n", + "cgF1 metric, IoU type=segm\n", + " Average cgF1 @[ IoU=0.50:0.95] = 0.381\n", + " Average precision @[ IoU=0.50:0.95] = 0.523\n", + " Average recall @[ IoU=0.50:0.95] = 0.512\n", + " Average F1 @[ IoU=0.50:0.95] = 0.517\n", + " Average positive_macro_F1 @[ IoU=0.50:0.95] = 0.754\n", + " Average positive_micro_F1 @[ IoU=0.50:0.95] = 0.576\n", + " Average positive_micro_precision @[ IoU=0.50:0.95] = 0.659\n", + " Average IL_precision = 0.700\n", + " Average IL_recall = 0.809\n", + " Average IL_F1 = 0.750\n", + " Average IL_FPR = 0.118\n", + " Average IL_MCC = 0.661\n", + " Average cgF1 @[ IoU=0.50 ] = 0.435\n", + " Average precision @[ IoU=0.50 ] = 0.597\n", + " Average recall @[ IoU=0.50 ] = 0.585\n", + " Average F1 @[ IoU=0.50 ] = 0.591\n", + " Average positive_macro_F1 @[ IoU=0.50 ] = 0.838\n", + " Average positive_micro_F1 @[ IoU=0.50 ] = 0.658\n", + " Average positive_micro_precision @[ IoU=0.50 ] = 0.753\n", + " Average cgF1 @[ IoU=0.75 ] = 0.399\n", + " Average precision @[ IoU=0.75 ] = 0.547\n", + " Average recall @[ IoU=0.75 ] = 0.536\n", + " Average F1 @[ IoU=0.75 ] = 0.542\n", + " Average positive_macro_F1 @[ IoU=0.75 ] = 0.781\n", + " Average positive_micro_F1 @[ IoU=0.75 ] = 0.603\n", + " Average positive_micro_precision @[ IoU=0.75 ] = 0.690\n", + "\n", + "{'cgF1_eval_segm_cgF1': 0.38061980191685274, 'cgF1_eval_segm_precision': 0.5226448021036465, 'cgF1_eval_segm_recall': 0.5118585116882325, 'cgF1_eval_segm_F1': 0.5171454354933199, 'cgF1_eval_segm_positive_macro_F1': 0.7539468395539315, 'cgF1_eval_segm_positive_micro_F1': 0.576201025016698, 'cgF1_eval_segm_positive_micro_precision': 0.6591742420468004, 'cgF1_eval_segm_IL_precision': 0.6997999691183566, 'cgF1_eval_segm_IL_recall': 0.808677098006992, 'cgF1_eval_segm_IL_F1': 0.7503088318552136, 'cgF1_eval_segm_IL_FPR': 0.11755136469738198, 'cgF1_eval_segm_IL_MCC': 0.6605677279137476, 'cgF1_eval_segm_cgF1@0.5': 0.434669236088304, 'cgF1_eval_segm_precision@0.5': 0.5968559804098716, 'cgF1_eval_segm_recall@0.5': 0.5845381272236059, 'cgF1_eval_segm_F1@0.5': 0.5905828471879718, 'cgF1_eval_segm_positive_macro_F1@0.5': 0.8377112359789534, 'cgF1_eval_segm_positive_micro_F1@0.5': 0.6580237237158217, 'cgF1_eval_segm_positive_micro_precision@0.5': 0.7527714557079914, 'cgF1_eval_segm_cgF1@0.75': 0.3986390156764444, 'cgF1_eval_segm_precision@0.75': 0.5473856178308225, 'cgF1_eval_segm_recall@0.75': 0.5360887289698226, 'cgF1_eval_segm_F1@0.75': 0.5416282897167458, 'cgF1_eval_segm_positive_macro_F1@0.75': 0.78142131171764, 'cgF1_eval_segm_positive_micro_F1@0.75': 0.6034793993576627, 'cgF1_eval_segm_positive_micro_precision@0.75': 0.6903780508074329}\n", + "loading annotations into memory...\n", + "Done (t=0.32s)\n", + "creating index...\n", + "index created!\n", + "Loaded 34558 predictions\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 22221/22221 [00:02<00:00, 7605.50it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Accumulating results\n", + "cgF1 metric, IoU type=bbox\n", + " Average cgF1 @[ IoU=0.50:0.95] = 0.385\n", + " Average precision @[ IoU=0.50:0.95] = 0.528\n", + " Average recall @[ IoU=0.50:0.95] = 0.517\n", + " Average F1 @[ IoU=0.50:0.95] = 0.523\n", + " Average positive_macro_F1 @[ IoU=0.50:0.95] = 0.775\n", + " Average positive_micro_F1 @[ IoU=0.50:0.95] = 0.582\n", + " Average positive_micro_precision @[ IoU=0.50:0.95] = 0.666\n", + " Average IL_precision = 0.700\n", + " Average IL_recall = 0.809\n", + " Average IL_F1 = 0.750\n", + " Average IL_FPR = 0.118\n", + " Average IL_MCC = 0.661\n", + " Average cgF1 @[ IoU=0.50 ] = 0.432\n", + " Average precision @[ IoU=0.50 ] = 0.594\n", + " Average recall @[ IoU=0.50 ] = 0.582\n", + " Average F1 @[ IoU=0.50 ] = 0.588\n", + " Average positive_macro_F1 @[ IoU=0.50 ] = 0.838\n", + " Average positive_micro_F1 @[ IoU=0.50 ] = 0.655\n", + " Average positive_micro_precision @[ IoU=0.50 ] = 0.749\n", + " Average cgF1 @[ IoU=0.75 ] = 0.404\n", + " Average precision @[ IoU=0.75 ] = 0.555\n", + " Average recall @[ IoU=0.75 ] = 0.543\n", + " Average F1 @[ IoU=0.75 ] = 0.549\n", + " Average positive_macro_F1 @[ IoU=0.75 ] = 0.799\n", + " Average positive_micro_F1 @[ IoU=0.75 ] = 0.611\n", + " Average positive_micro_precision @[ IoU=0.75 ] = 0.699\n", + "\n", + "{'cgF1_eval_bbox_cgF1': 0.3846578129287702, 'cgF1_eval_bbox_precision': 0.528189088752597, 'cgF1_eval_bbox_recall': 0.5172883759116633, 'cgF1_eval_bbox_F1': 0.5226319143318613, 'cgF1_eval_bbox_positive_macro_F1': 0.7754605892309756, 'cgF1_eval_bbox_positive_micro_F1': 0.5823139652062992, 'cgF1_eval_bbox_positive_micro_precision': 0.6661668514342891, 'cgF1_eval_bbox_IL_precision': 0.6997999691183566, 'cgF1_eval_bbox_IL_recall': 0.808677098006992, 'cgF1_eval_bbox_IL_F1': 0.7503088318552136, 'cgF1_eval_bbox_IL_FPR': 0.11755136469738198, 'cgF1_eval_bbox_IL_MCC': 0.6605677279137476, 'cgF1_eval_bbox_cgF1@0.5': 0.4324122177242999, 'cgF1_eval_bbox_precision@0.5': 0.5937570397016169, 'cgF1_eval_bbox_recall@0.5': 0.5815031421393713, 'cgF1_eval_bbox_F1@0.5': 0.5875162177317533, 'cgF1_eval_bbox_positive_macro_F1@0.5': 0.8378319170832961, 'cgF1_eval_bbox_positive_micro_F1@0.5': 0.6546069380197775, 'cgF1_eval_bbox_positive_micro_precision@0.5': 0.748862985013765, 'cgF1_eval_bbox_cgF1@0.75': 0.40385067625751253, 'cgF1_eval_bbox_precision@0.75': 0.5545413536480653, 'cgF1_eval_bbox_recall@0.75': 0.5430967854370555, 'cgF1_eval_bbox_F1@0.75': 0.5487094159071108, 'cgF1_eval_bbox_positive_macro_F1@0.75': 0.798977400359963, 'cgF1_eval_bbox_positive_micro_F1@0.75': 0.6113690681392848, 'cgF1_eval_bbox_positive_micro_precision@0.75': 0.6994030649559191}\n", + "Processing subset: sav\n", + "loading annotations into memory...\n", + "Done (t=0.46s)\n", + "creating index...\n", + "index created!\n", + "Loaded 74229 predictions\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 18079/18079 [00:05<00:00, 3149.81it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Accumulating results\n", + "cgF1 metric, IoU type=segm\n", + " Average cgF1 @[ IoU=0.50:0.95] = 0.444\n", + " Average precision @[ IoU=0.50:0.95] = 0.587\n", + " Average recall @[ IoU=0.50:0.95] = 0.684\n", + " Average F1 @[ IoU=0.50:0.95] = 0.632\n", + " Average positive_macro_F1 @[ IoU=0.50:0.95] = 0.768\n", + " Average positive_micro_F1 @[ IoU=0.50:0.95] = 0.660\n", + " Average positive_micro_precision @[ IoU=0.50:0.95] = 0.639\n", + " Average IL_precision = 0.900\n", + " Average IL_recall = 0.908\n", + " Average IL_F1 = 0.904\n", + " Average IL_FPR = 0.241\n", + " Average IL_MCC = 0.672\n", + " Average cgF1 @[ IoU=0.50 ] = 0.503\n", + " Average precision @[ IoU=0.50 ] = 0.666\n", + " Average recall @[ IoU=0.50 ] = 0.776\n", + " Average F1 @[ IoU=0.50 ] = 0.717\n", + " Average positive_macro_F1 @[ IoU=0.50 ] = 0.846\n", + " Average positive_micro_F1 @[ IoU=0.50 ] = 0.750\n", + " Average positive_micro_precision @[ IoU=0.50 ] = 0.725\n", + " Average cgF1 @[ IoU=0.75 ] = 0.470\n", + " Average precision @[ IoU=0.75 ] = 0.622\n", + " Average recall @[ IoU=0.75 ] = 0.725\n", + " Average F1 @[ IoU=0.75 ] = 0.669\n", + " Average positive_macro_F1 @[ IoU=0.75 ] = 0.806\n", + " Average positive_micro_F1 @[ IoU=0.75 ] = 0.700\n", + " Average positive_micro_precision @[ IoU=0.75 ] = 0.677\n", + "\n", + "{'cgF1_eval_segm_cgF1': 0.44362320439319136, 'cgF1_eval_segm_precision': 0.5869346360044498, 'cgF1_eval_segm_recall': 0.6838706968595512, 'cgF1_eval_segm_F1': 0.6316558570754611, 'cgF1_eval_segm_positive_macro_F1': 0.7677480052550809, 'cgF1_eval_segm_positive_micro_F1': 0.66049478026234, 'cgF1_eval_segm_positive_micro_precision': 0.6387574946422351, 'cgF1_eval_segm_IL_precision': 0.8998911013612405, 'cgF1_eval_segm_IL_recall': 0.9082273511612319, 'cgF1_eval_segm_IL_F1': 0.9040395093174578, 'cgF1_eval_segm_IL_FPR': 0.24096611117001196, 'cgF1_eval_segm_IL_MCC': 0.671652854269325, 'cgF1_eval_segm_cgF1@0.5': 0.5034829560314773, 'cgF1_eval_segm_precision@0.5': 0.6661259526094324, 'cgF1_eval_segm_recall@0.5': 0.776140972883037, 'cgF1_eval_segm_F1@0.5': 0.7168878262296565, 'cgF1_eval_segm_positive_macro_F1@0.5': 0.8464809541363024, 'cgF1_eval_segm_positive_micro_F1@0.5': 0.7496178313411686, 'cgF1_eval_segm_positive_micro_precision@0.5': 0.7249409363562374, 'cgF1_eval_segm_cgF1@0.75': 0.47003419875527414, 'cgF1_eval_segm_precision@0.75': 0.6218749985165196, 'cgF1_eval_segm_recall@0.75': 0.7245816867958706, 'cgF1_eval_segm_F1@0.75': 0.669261443378069, 'cgF1_eval_segm_positive_macro_F1@0.75': 0.8063026840071241, 'cgF1_eval_segm_positive_micro_F1@0.75': 0.6998171685976278, 'cgF1_eval_segm_positive_micro_precision@0.75': 0.6767828846107561}\n", + "loading annotations into memory...\n", + "Done (t=0.40s)\n", + "creating index...\n", + "index created!\n", + "Loaded 74229 predictions\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|███████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 18079/18079 [00:03<00:00, 4783.75it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Accumulating results\n", + "cgF1 metric, IoU type=bbox\n", + " Average cgF1 @[ IoU=0.50:0.95] = 0.444\n", + " Average precision @[ IoU=0.50:0.95] = 0.588\n", + " Average recall @[ IoU=0.50:0.95] = 0.685\n", + " Average F1 @[ IoU=0.50:0.95] = 0.632\n", + " Average positive_macro_F1 @[ IoU=0.50:0.95] = 0.776\n", + " Average positive_micro_F1 @[ IoU=0.50:0.95] = 0.661\n", + " Average positive_micro_precision @[ IoU=0.50:0.95] = 0.639\n", + " Average IL_precision = 0.900\n", + " Average IL_recall = 0.908\n", + " Average IL_F1 = 0.904\n", + " Average IL_FPR = 0.241\n", + " Average IL_MCC = 0.672\n", + " Average cgF1 @[ IoU=0.50 ] = 0.503\n", + " Average precision @[ IoU=0.50 ] = 0.665\n", + " Average recall @[ IoU=0.50 ] = 0.775\n", + " Average F1 @[ IoU=0.50 ] = 0.716\n", + " Average positive_macro_F1 @[ IoU=0.50 ] = 0.846\n", + " Average positive_micro_F1 @[ IoU=0.50 ] = 0.748\n", + " Average positive_micro_precision @[ IoU=0.50 ] = 0.724\n", + " Average cgF1 @[ IoU=0.75 ] = 0.470\n", + " Average precision @[ IoU=0.75 ] = 0.622\n", + " Average recall @[ IoU=0.75 ] = 0.724\n", + " Average F1 @[ IoU=0.75 ] = 0.669\n", + " Average positive_macro_F1 @[ IoU=0.75 ] = 0.806\n", + " Average positive_micro_F1 @[ IoU=0.75 ] = 0.700\n", + " Average positive_micro_precision @[ IoU=0.75 ] = 0.677\n", + "\n", + "{'cgF1_eval_bbox_cgF1': 0.44413891029397917, 'cgF1_eval_bbox_precision': 0.5876168879112194, 'cgF1_eval_bbox_recall': 0.6846656270924854, 'cgF1_eval_bbox_F1': 0.6323901506230807, 'cgF1_eval_bbox_positive_macro_F1': 0.7758898702585046, 'cgF1_eval_bbox_positive_micro_F1': 0.6612625964005575, 'cgF1_eval_bbox_positive_micro_precision': 0.6394999853591734, 'cgF1_eval_bbox_IL_precision': 0.8998911013612405, 'cgF1_eval_bbox_IL_recall': 0.9082273511612319, 'cgF1_eval_bbox_IL_F1': 0.9040395093174578, 'cgF1_eval_bbox_IL_FPR': 0.24096611117001196, 'cgF1_eval_bbox_IL_MCC': 0.671652854269325, 'cgF1_eval_bbox_cgF1@0.5': 0.502671530652559, 'cgF1_eval_bbox_precision@0.5': 0.6650524793295505, 'cgF1_eval_bbox_recall@0.5': 0.7748902085305182, 'cgF1_eval_bbox_F1@0.5': 0.7157324692328706, 'cgF1_eval_bbox_positive_macro_F1@0.5': 0.8464436229967108, 'cgF1_eval_bbox_positive_micro_F1@0.5': 0.74840972901009, 'cgF1_eval_bbox_positive_micro_precision@0.5': 0.723772681731684, 'cgF1_eval_bbox_cgF1@0.75': 0.4699260087048218, 'cgF1_eval_bbox_precision@0.75': 0.6217318687458687, 'cgF1_eval_bbox_recall@0.75': 0.7244149182155347, 'cgF1_eval_bbox_F1@0.75': 0.6691073957786058, 'cgF1_eval_bbox_positive_macro_F1@0.75': 0.806477309183606, 'cgF1_eval_bbox_positive_micro_F1@0.75': 0.6996560882869217, 'cgF1_eval_bbox_positive_micro_precision@0.75': 0.6766271173274823}\n", + "Processing subset: yt1b\n", + "loading annotations into memory...\n", + "Done (t=0.15s)\n", + "creating index...\n", + "index created!\n", + "Loaded 23120 predictions\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 7778/7778 [00:01<00:00, 4510.67it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Accumulating results\n", + "cgF1 metric, IoU type=segm\n", + " Average cgF1 @[ IoU=0.50:0.95] = 0.421\n", + " Average precision @[ IoU=0.50:0.95] = 0.545\n", + " Average recall @[ IoU=0.50:0.95] = 0.567\n", + " Average F1 @[ IoU=0.50:0.95] = 0.556\n", + " Average positive_macro_F1 @[ IoU=0.50:0.95] = 0.720\n", + " Average positive_micro_F1 @[ IoU=0.50:0.95] = 0.584\n", + " Average positive_micro_precision @[ IoU=0.50:0.95] = 0.601\n", + " Average IL_precision = 0.853\n", + " Average IL_recall = 0.841\n", + " Average IL_F1 = 0.847\n", + " Average IL_FPR = 0.121\n", + " Average IL_MCC = 0.721\n", + " Average cgF1 @[ IoU=0.50 ] = 0.505\n", + " Average precision @[ IoU=0.50 ] = 0.655\n", + " Average recall @[ IoU=0.50 ] = 0.681\n", + " Average F1 @[ IoU=0.50 ] = 0.668\n", + " Average positive_macro_F1 @[ IoU=0.50 ] = 0.820\n", + " Average positive_micro_F1 @[ IoU=0.50 ] = 0.701\n", + " Average positive_micro_precision @[ IoU=0.50 ] = 0.722\n", + " Average cgF1 @[ IoU=0.75 ] = 0.454\n", + " Average precision @[ IoU=0.75 ] = 0.588\n", + " Average recall @[ IoU=0.75 ] = 0.611\n", + " Average F1 @[ IoU=0.75 ] = 0.599\n", + " Average positive_macro_F1 @[ IoU=0.75 ] = 0.759\n", + " Average positive_micro_F1 @[ IoU=0.75 ] = 0.629\n", + " Average positive_micro_precision @[ IoU=0.75 ] = 0.648\n", + "\n", + "{'cgF1_eval_segm_cgF1': 0.42073151404696596, 'cgF1_eval_segm_precision': 0.5453179724143051, 'cgF1_eval_segm_recall': 0.5669906233339933, 'cgF1_eval_segm_F1': 0.5558931826749454, 'cgF1_eval_segm_positive_macro_F1': 0.719514022577698, 'cgF1_eval_segm_positive_micro_F1': 0.5836034594081336, 'cgF1_eval_segm_positive_micro_precision': 0.6013252291429667, 'cgF1_eval_segm_IL_precision': 0.8525862066515557, 'cgF1_eval_segm_IL_recall': 0.8407480870386093, 'cgF1_eval_segm_IL_F1': 0.8466252666543157, 'cgF1_eval_segm_IL_FPR': 0.12073429039286084, 'cgF1_eval_segm_IL_MCC': 0.7209201852121546, 'cgF1_eval_segm_cgF1@0.5': 0.5052842773564472, 'cgF1_eval_segm_precision@0.5': 0.6548990097237606, 'cgF1_eval_segm_recall@0.5': 0.6809267556323607, 'cgF1_eval_segm_F1@0.5': 0.6676093380670313, 'cgF1_eval_segm_positive_macro_F1@0.5': 0.8203674808429026, 'cgF1_eval_segm_positive_micro_F1@0.5': 0.7008879591958582, 'cgF1_eval_segm_positive_micro_precision@0.5': 0.7221608621188951, 'cgF1_eval_segm_cgF1@0.75': 0.4535093938858592, 'cgF1_eval_segm_precision@0.75': 0.587798359307308, 'cgF1_eval_segm_recall@0.75': 0.6111593143773055, 'cgF1_eval_segm_F1@0.75': 0.5992012730504395, 'cgF1_eval_segm_positive_macro_F1@0.75': 0.7591976776888013, 'cgF1_eval_segm_positive_micro_F1@0.75': 0.629070184451, 'cgF1_eval_segm_positive_micro_precision@0.75': 0.6481685933354631}\n", + "loading annotations into memory...\n", + "Done (t=0.11s)\n", + "creating index...\n", + "index created!\n", + "Loaded 23120 predictions\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 7778/7778 [00:01<00:00, 6379.32it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Accumulating results\n", + "cgF1 metric, IoU type=bbox\n", + " Average cgF1 @[ IoU=0.50:0.95] = 0.424\n", + " Average precision @[ IoU=0.50:0.95] = 0.549\n", + " Average recall @[ IoU=0.50:0.95] = 0.571\n", + " Average F1 @[ IoU=0.50:0.95] = 0.560\n", + " Average positive_macro_F1 @[ IoU=0.50:0.95] = 0.732\n", + " Average positive_micro_F1 @[ IoU=0.50:0.95] = 0.588\n", + " Average positive_micro_precision @[ IoU=0.50:0.95] = 0.605\n", + " Average IL_precision = 0.853\n", + " Average IL_recall = 0.841\n", + " Average IL_F1 = 0.847\n", + " Average IL_FPR = 0.121\n", + " Average IL_MCC = 0.721\n", + " Average cgF1 @[ IoU=0.50 ] = 0.502\n", + " Average precision @[ IoU=0.50 ] = 0.651\n", + " Average recall @[ IoU=0.50 ] = 0.677\n", + " Average F1 @[ IoU=0.50 ] = 0.664\n", + " Average positive_macro_F1 @[ IoU=0.50 ] = 0.818\n", + " Average positive_micro_F1 @[ IoU=0.50 ] = 0.697\n", + " Average positive_micro_precision @[ IoU=0.50 ] = 0.718\n", + " Average cgF1 @[ IoU=0.75 ] = 0.454\n", + " Average precision @[ IoU=0.75 ] = 0.588\n", + " Average recall @[ IoU=0.75 ] = 0.611\n", + " Average F1 @[ IoU=0.75 ] = 0.599\n", + " Average positive_macro_F1 @[ IoU=0.75 ] = 0.761\n", + " Average positive_micro_F1 @[ IoU=0.75 ] = 0.629\n", + " Average positive_micro_precision @[ IoU=0.75 ] = 0.648\n", + "\n", + "{'cgF1_eval_bbox_cgF1': 0.4236486808813284, 'cgF1_eval_bbox_precision': 0.549098643389262, 'cgF1_eval_bbox_recall': 0.5709215500614235, 'cgF1_eval_bbox_F1': 0.5597475175248535, 'cgF1_eval_bbox_positive_macro_F1': 0.7315482621439834, 'cgF1_eval_bbox_positive_micro_F1': 0.5876499085077717, 'cgF1_eval_bbox_positive_micro_precision': 0.605494196525914, 'cgF1_eval_bbox_IL_precision': 0.8525862066515557, 'cgF1_eval_bbox_IL_recall': 0.8407480870386093, 'cgF1_eval_bbox_IL_F1': 0.8466252666543157, 'cgF1_eval_bbox_IL_FPR': 0.12073429039286084, 'cgF1_eval_bbox_IL_MCC': 0.7209201852121546, 'cgF1_eval_bbox_cgF1@0.5': 0.5021932395356122, 'cgF1_eval_bbox_precision@0.5': 0.6508930007436738, 'cgF1_eval_bbox_recall@0.5': 0.6767615352589246, 'cgF1_eval_bbox_F1@0.5': 0.6635252744814799, 'cgF1_eval_bbox_positive_macro_F1@0.5': 0.8180919678307201, 'cgF1_eval_bbox_positive_micro_F1@0.5': 0.69660033085053, 'cgF1_eval_bbox_positive_micro_precision@0.5': 0.7177434132363022, 'cgF1_eval_bbox_cgF1@0.75': 0.4535737905070497, 'cgF1_eval_bbox_precision@0.75': 0.5878818178277265, 'cgF1_eval_bbox_recall@0.75': 0.6112460898017521, 'cgF1_eval_bbox_F1@0.75': 0.5992863577083599, 'cgF1_eval_bbox_positive_macro_F1@0.75': 0.7609919097954506, 'cgF1_eval_bbox_positive_micro_F1@0.75': 0.629159510041421, 'cgF1_eval_bbox_positive_micro_precision@0.75': 0.648260623520517}\n", + "Processing subset: fathomnet\n", + "loading annotations into memory...\n", + "Done (t=0.82s)\n", + "creating index...\n", + "index created!\n", + "Loaded 25749 predictions\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 281205/281205 [00:14<00:00, 19028.75it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Accumulating results\n", + "cgF1 metric, IoU type=segm\n", + " Average cgF1 @[ IoU=0.50:0.95] = 0.515\n", + " Average precision @[ IoU=0.50:0.95] = 0.472\n", + " Average recall @[ IoU=0.50:0.95] = 0.711\n", + " Average F1 @[ IoU=0.50:0.95] = 0.567\n", + " Average positive_macro_F1 @[ IoU=0.50:0.95] = 0.690\n", + " Average positive_micro_F1 @[ IoU=0.50:0.95] = 0.600\n", + " Average positive_micro_precision @[ IoU=0.50:0.95] = 0.519\n", + " Average IL_precision = 0.839\n", + " Average IL_recall = 0.885\n", + " Average IL_F1 = 0.861\n", + " Average IL_FPR = 0.003\n", + " Average IL_MCC = 0.859\n", + " Average cgF1 @[ IoU=0.50 ] = 0.615\n", + " Average precision @[ IoU=0.50 ] = 0.564\n", + " Average recall @[ IoU=0.50 ] = 0.848\n", + " Average F1 @[ IoU=0.50 ] = 0.677\n", + " Average positive_macro_F1 @[ IoU=0.50 ] = 0.828\n", + " Average positive_micro_F1 @[ IoU=0.50 ] = 0.716\n", + " Average positive_micro_precision @[ IoU=0.50 ] = 0.619\n", + " Average cgF1 @[ IoU=0.75 ] = 0.560\n", + " Average precision @[ IoU=0.75 ] = 0.513\n", + " Average recall @[ IoU=0.75 ] = 0.772\n", + " Average F1 @[ IoU=0.75 ] = 0.617\n", + " Average positive_macro_F1 @[ IoU=0.75 ] = 0.750\n", + " Average positive_micro_F1 @[ IoU=0.75 ] = 0.652\n", + " Average positive_micro_precision @[ IoU=0.75 ] = 0.564\n", + "\n", + "{'cgF1_eval_segm_cgF1': 0.5152972059816302, 'cgF1_eval_segm_precision': 0.47227706231315125, 'cgF1_eval_segm_recall': 0.7107059458476863, 'cgF1_eval_segm_F1': 0.5674160042153165, 'cgF1_eval_segm_positive_macro_F1': 0.6904954435936812, 'cgF1_eval_segm_positive_micro_F1': 0.5997854205638585, 'cgF1_eval_segm_positive_micro_precision': 0.5188866930899365, 'cgF1_eval_segm_IL_precision': 0.8390254059060754, 'cgF1_eval_segm_IL_recall': 0.8845225025500498, 'cgF1_eval_segm_IL_F1': 0.8611729531944466, 'cgF1_eval_segm_IL_FPR': 0.0027941442255456565, 'cgF1_eval_segm_IL_MCC': 0.8591359314756251, 'cgF1_eval_segm_cgF1@0.5': 0.6149429060698316, 'cgF1_eval_segm_precision@0.5': 0.5635963077131737, 'cgF1_eval_segm_recall@0.5': 0.8481276752838837, 'cgF1_eval_segm_F1@0.5': 0.6771405387867684, 'cgF1_eval_segm_positive_macro_F1@0.5': 0.8277623237104728, 'cgF1_eval_segm_positive_micro_F1@0.5': 0.7157690460153667, 'cgF1_eval_segm_positive_micro_precision@0.5': 0.6192183522838083, 'cgF1_eval_segm_cgF1@0.75': 0.5599254460996396, 'cgF1_eval_segm_precision@0.75': 0.5131761400573049, 'cgF1_eval_segm_recall@0.75': 0.7722529064180121, 'cgF1_eval_segm_F1@0.75': 0.6165582438611041, 'cgF1_eval_segm_positive_macro_F1@0.75': 0.7496616653080356, 'cgF1_eval_segm_positive_micro_F1@0.75': 0.6517309142663014, 'cgF1_eval_segm_positive_micro_precision@0.75': 0.5638221534257606}\n", + "loading annotations into memory...\n", + "Done (t=0.79s)\n", + "creating index...\n", + "index created!\n", + "Loaded 25749 predictions\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 281205/281205 [00:14<00:00, 19965.61it/s]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Accumulating results\n", + "cgF1 metric, IoU type=bbox\n", + " Average cgF1 @[ IoU=0.50:0.95] = 0.540\n", + " Average precision @[ IoU=0.50:0.95] = 0.495\n", + " Average recall @[ IoU=0.50:0.95] = 0.745\n", + " Average F1 @[ IoU=0.50:0.95] = 0.595\n", + " Average positive_macro_F1 @[ IoU=0.50:0.95] = 0.747\n", + " Average positive_micro_F1 @[ IoU=0.50:0.95] = 0.629\n", + " Average positive_micro_precision @[ IoU=0.50:0.95] = 0.544\n", + " Average IL_precision = 0.839\n", + " Average IL_recall = 0.885\n", + " Average IL_F1 = 0.861\n", + " Average IL_FPR = 0.003\n", + " Average IL_MCC = 0.859\n", + " Average cgF1 @[ IoU=0.50 ] = 0.620\n", + " Average precision @[ IoU=0.50 ] = 0.568\n", + " Average recall @[ IoU=0.50 ] = 0.855\n", + " Average F1 @[ IoU=0.50 ] = 0.683\n", + " Average positive_macro_F1 @[ IoU=0.50 ] = 0.842\n", + " Average positive_micro_F1 @[ IoU=0.50 ] = 0.722\n", + " Average positive_micro_precision @[ IoU=0.50 ] = 0.625\n", + " Average cgF1 @[ IoU=0.75 ] = 0.577\n", + " Average precision @[ IoU=0.75 ] = 0.528\n", + " Average recall @[ IoU=0.75 ] = 0.795\n", + " Average F1 @[ IoU=0.75 ] = 0.635\n", + " Average positive_macro_F1 @[ IoU=0.75 ] = 0.786\n", + " Average positive_micro_F1 @[ IoU=0.75 ] = 0.671\n", + " Average positive_micro_precision @[ IoU=0.75 ] = 0.581\n", + "\n", + "{'cgF1_eval_bbox_cgF1': 0.5404467727790648, 'cgF1_eval_bbox_precision': 0.4953251162982368, 'cgF1_eval_bbox_recall': 0.7453898005477004, 'cgF1_eval_bbox_F1': 0.5951093668775386, 'cgF1_eval_bbox_positive_macro_F1': 0.7472057384312792, 'cgF1_eval_bbox_positive_micro_F1': 0.6290585144667509, 'cgF1_eval_bbox_positive_micro_precision': 0.5442093891698694, 'cgF1_eval_bbox_IL_precision': 0.8390254059060754, 'cgF1_eval_bbox_IL_recall': 0.8845225025500498, 'cgF1_eval_bbox_IL_F1': 0.8611729531944466, 'cgF1_eval_bbox_IL_FPR': 0.0027941442255456565, 'cgF1_eval_bbox_IL_MCC': 0.8591359314756251, 'cgF1_eval_bbox_cgF1@0.5': 0.6201953820242813, 'cgF1_eval_bbox_precision@0.5': 0.5684098835897048, 'cgF1_eval_bbox_recall@0.5': 0.8553713829911432, 'cgF1_eval_bbox_F1@0.5': 0.6829242853928004, 'cgF1_eval_bbox_positive_macro_F1@0.5': 0.8423905574972759, 'cgF1_eval_bbox_positive_micro_F1@0.5': 0.7218827187905563, 'cgF1_eval_bbox_positive_micro_precision@0.5': 0.6245069861553695, 'cgF1_eval_bbox_cgF1@0.75': 0.5765731241108606, 'cgF1_eval_bbox_precision@0.75': 0.5284327280049546, 'cgF1_eval_bbox_recall@0.75': 0.7952117766088179, 'cgF1_eval_bbox_F1@0.75': 0.6348897796975862, 'cgF1_eval_bbox_positive_macro_F1@0.75': 0.786060071093738, 'cgF1_eval_bbox_positive_micro_F1@0.75': 0.6711081483003005, 'cgF1_eval_bbox_positive_micro_precision@0.75': 0.5805844336627427}\n" + ] + } + ], + "source": [ + "results_silver = {}\n", + "results_silver_bbox = {}\n", + "\n", + "for subset_name, gt in saco_silver_gts.items():\n", + " print(\"Processing subset: \", subset_name)\n", + " gt_path = os.path.join(GT_DIR, gt)\n", + " pred_path = os.path.join(PRED_DIR, f\"silver_{subset_name}/dumps/silver_{subset_name}/coco_predictions_segm.json\")\n", + " \n", + " evaluator = CGF1Evaluator(gt_path=gt_path, verbose=True, iou_type=\"segm\") \n", + " summary = evaluator.evaluate(pred_path)\n", + " print(summary)\n", + "\n", + " cur_results = {}\n", + " cur_results[\"cgf1\"] = summary[\"cgF1_eval_segm_cgF1\"] * 100\n", + " cur_results[\"il_mcc\"] = summary[\"cgF1_eval_segm_IL_MCC\"]\n", + " cur_results[\"pmf1\"] = summary[\"cgF1_eval_segm_positive_micro_F1\"] * 100\n", + " results_silver[subset_name] = cur_results\n", + "\n", + " # Also eval bbox \n", + " evaluator = CGF1Evaluator(gt_path=gt_path, verbose=True, iou_type=\"bbox\") \n", + " summary = evaluator.evaluate(pred_path)\n", + " print(summary)\n", + "\n", + " cur_results = {}\n", + " cur_results[\"cgf1\"] = summary[\"cgF1_eval_bbox_cgF1\"] * 100\n", + " cur_results[\"il_mcc\"] = summary[\"cgF1_eval_bbox_IL_MCC\"]\n", + " cur_results[\"pmf1\"] = summary[\"cgF1_eval_bbox_positive_micro_F1\"] * 100\n", + " results_silver_bbox[subset_name] = cur_results\n" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "f90e949a-05ad-46e4-9b97-78b36a5f8c65", + "metadata": {}, + "outputs": [], + "source": [ + "# Compute averages\n", + "METRICS = [\"cgf1\", \"il_mcc\", \"pmf1\"]\n", + "avg_stats, avg_stats_bbox = {}, {}\n", + "for key in METRICS:\n", + " avg_stats[key] = sum(res[key] for res in results_silver.values()) / len(results_silver)\n", + " avg_stats_bbox[key] = sum(res[key] for res in results_silver_bbox.values()) / len(results_silver_bbox)\n", + "results_silver[\"Average\"] = avg_stats\n", + "results_silver_bbox[\"Average\"] = avg_stats_bbox" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "791eefb4-5e36-4dc0-9f26-2870355a7997", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
bdd100kdroidego4dfood_recgeodeinaturalistnga_artsavyt1bfathomnetAverage
cgf1il_mccpmf1cgf1il_mccpmf1cgf1il_mccpmf1cgf1il_mccpmf1cgf1il_mccpmf1cgf1il_mccpmf1cgf1il_mccpmf1cgf1il_mccpmf1cgf1il_mccpmf1cgf1il_mccpmf1cgf1il_mccpmf1
46.610.7860.1345.580.7660.3538.640.6262.5652.960.7967.2170.070.8978.7365.80.8280.6738.060.6657.6244.360.6766.0542.070.7258.3651.530.8659.9849.570.7665.17
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Pretty print segmentation results\n", + "from IPython.display import HTML, display\n", + "\n", + "row1, row2, row3 = \"\", \"\", \"\"\n", + "for subset in results_silver:\n", + " row1 += f'{subset}'\n", + " row2 += \"\" + \"\".join(METRICS) + \"\"\n", + " row3 += \"\" + \"\".join([str(round(results_silver[subset][k], 2)) for k in METRICS]) + \"\"\n", + "\n", + "display(HTML(\n", + " f\"{row1}{row2}{row3}
\"\n", + "))" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "id": "9250c2da-fe9a-4c13-be32-edb54748440e", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "
bdd100kdroidego4dfood_recgeodeinaturalistnga_artsavyt1bfathomnetAverage
cgf1il_mccpmf1cgf1il_mccpmf1cgf1il_mccpmf1cgf1il_mccpmf1cgf1il_mccpmf1cgf1il_mccpmf1cgf1il_mccpmf1cgf1il_mccpmf1cgf1il_mccpmf1cgf1il_mccpmf1cgf1il_mccpmf1
46.210.7859.6246.120.7661.0738.790.6262.853.350.7967.7170.760.8979.5165.240.8279.9938.470.6658.2344.410.6766.1342.360.7258.7654.040.8662.9149.980.7665.67
" + ], + "text/plain": [ + "" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Pretty print bbox detection results\n", + "from IPython.display import HTML, display\n", + "\n", + "row1, row2, row3 = \"\", \"\", \"\"\n", + "for subset in results_silver_bbox:\n", + " row1 += f'{subset}'\n", + " row2 += \"\" + \"\".join(METRICS) + \"\"\n", + " row3 += \"\" + \"\".join([str(round(results_silver_bbox[subset][k], 2)) for k in METRICS]) + \"\"\n", + "\n", + "display(HTML(\n", + " f\"{row1}{row2}{row3}
\"\n", + "))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9059d9e2-ce61-42f9-9b12-3c61b3bf75bb", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/source_code/sam3/examples/saco_veval_vis_example.ipynb b/source_code/sam3/examples/saco_veval_vis_example.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..4543f3517370270a4c9e8ed38cf925f679bb2fe2 --- /dev/null +++ b/source_code/sam3/examples/saco_veval_vis_example.ipynb @@ -0,0 +1,269 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "37048f21", + "metadata": {}, + "outputs": [], + "source": [ + "# Copyright (c) Meta Platforms, Inc. and affiliates." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "154d8663", + "metadata": {}, + "outputs": [], + "source": [ + "using_colab = False" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b85d99d9", + "metadata": {}, + "outputs": [], + "source": [ + "if using_colab:\n", + " import torch\n", + " import torchvision\n", + " print(\"PyTorch version:\", torch.__version__)\n", + " print(\"Torchvision version:\", torchvision.__version__)\n", + " print(\"CUDA is available:\", torch.cuda.is_available())\n", + " import sys\n", + " !{sys.executable} -m pip install opencv-python matplotlib scikit-learn\n", + " !{sys.executable} -m pip install 'git+https://github.com/facebookresearch/sam3.git'" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "da21a3bc", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "from glob import glob\n", + "\n", + "import numpy as np\n", + "import utils\n", + "\n", + "from matplotlib import pyplot as plt\n", + "\n", + "COLORS = utils.pascal_color_map()[1:]" + ] + }, + { + "cell_type": "markdown", + "id": "57e85e7e", + "metadata": {}, + "source": [ + "1. Load the data" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a796734e", + "metadata": {}, + "outputs": [], + "source": [ + "# Preapre the data path\n", + "DATA_DIR = \"./sam3_saco_veval_data\" # PUT YOUR DATA PATH HERE\n", + "ANNOT_DIR = os.path.join(DATA_DIR, \"annotation\")\n", + "\n", + "# Load the SACO/Veval annotation files\n", + "annot_file_list = glob(os.path.join(ANNOT_DIR, \"*veval*.json\"))\n", + "annot_dfs = utils.get_annot_dfs(file_list=annot_file_list)" + ] + }, + { + "cell_type": "markdown", + "id": "74bf92b1", + "metadata": {}, + "source": [ + "Show the annotation files being loaded" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a95620ec", + "metadata": {}, + "outputs": [], + "source": [ + "annot_dfs.keys()" + ] + }, + { + "cell_type": "markdown", + "id": "5ce211d3", + "metadata": {}, + "source": [ + "2. Examples of the data format" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6ba749db", + "metadata": {}, + "outputs": [], + "source": [ + "annot_dfs[\"saco_veval_yt1b_val\"].keys()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4b6dc186", + "metadata": {}, + "outputs": [], + "source": [ + "annot_dfs[\"saco_veval_yt1b_val\"][\"info\"]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c41091b3", + "metadata": {}, + "outputs": [], + "source": [ + "annot_dfs[\"saco_veval_yt1b_val\"][\"videos\"].head(3)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a7df5771", + "metadata": {}, + "outputs": [], + "source": [ + "annot_dfs[\"saco_veval_yt1b_val\"][\"annotations\"].head(3)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "24d2861c", + "metadata": {}, + "outputs": [], + "source": [ + "annot_dfs[\"saco_veval_yt1b_val\"][\"categories\"].head(3)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f9f98f27", + "metadata": {}, + "outputs": [], + "source": [ + "annot_dfs[\"saco_veval_yt1b_val\"][\"video_np_pairs\"].head(3)" + ] + }, + { + "cell_type": "markdown", + "id": "5673a63f", + "metadata": {}, + "source": [ + "3. Visualize the data" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "da827d09", + "metadata": {}, + "outputs": [], + "source": [ + "# Select a target dataset\n", + "target_dataset_name = \"saco_veval_yt1b_val\"\n", + "\n", + "# visualize a random positive video-np pair\n", + "df_pairs = annot_dfs[target_dataset_name][\"video_np_pairs\"]\n", + "df_positive_pairs = df_pairs[df_pairs.num_masklets > 0]\n", + "rand_idx = np.random.randint(len(df_positive_pairs))\n", + "pair_row = df_positive_pairs.iloc[rand_idx]\n", + "video_id = pair_row.video_id\n", + "noun_phrase = pair_row.noun_phrase\n", + "print(f\"Randomly selected video-np pair: video_id={video_id}, noun_phrase={noun_phrase}\")\n", + "\n", + "def display_image_in_subplot(img, axes, row, col, title=\"\"):\n", + " axes[row, col].imshow(img)\n", + " axes[row, col].set_title(title)\n", + " axes[row, col].axis('off')\n", + "\n", + "num_frames_to_show = 5 # Number of frames to show per dataset\n", + "every_n_frames = 4 # Interval between frames to show\n", + "\n", + "fig, axes = plt.subplots(num_frames_to_show, 3, figsize=(15, 5 * num_frames_to_show))\n", + "\n", + "for idx in range(0, num_frames_to_show):\n", + " sampled_frame_idx = idx * every_n_frames\n", + " print(f\"Reading annotations for frame {sampled_frame_idx}\")\n", + " # Get the frame and the corresponding masks and noun phrases\n", + " frame, annot_masks, annot_noun_phrases = utils.get_all_annotations_for_frame(\n", + " annot_dfs[target_dataset_name], video_id=video_id, frame_idx=sampled_frame_idx, data_dir=DATA_DIR, dataset=target_dataset_name\n", + " )\n", + " # Filter masks and noun phrases by the selected noun phrase\n", + " annot_masks = [m for m, np in zip(annot_masks, annot_noun_phrases) if np == noun_phrase]\n", + "\n", + " # Show the frame\n", + " display_image_in_subplot(frame, axes, idx, 0, f\"{target_dataset_name} - {noun_phrase} - Frame {sampled_frame_idx}\")\n", + "\n", + " # Show the annotated masks\n", + " if annot_masks is None:\n", + " print(f\"No masks found for video_id {video_id} at frame {sampled_frame_idx}\")\n", + " else:\n", + " # Show all masks over a white background\n", + " all_masks = utils.draw_masks_to_frame(\n", + " frame=np.ones_like(frame)*255, masks=annot_masks, colors=COLORS[: len(annot_masks)]\n", + " )\n", + " display_image_in_subplot(all_masks, axes, idx, 1, f\"{target_dataset_name} - {noun_phrase} - Frame {sampled_frame_idx} - Masks\")\n", + " \n", + " # Show masks overlaid on the frame\n", + " masked_frame = utils.draw_masks_to_frame(\n", + " frame=frame, masks=annot_masks, colors=COLORS[: len(annot_masks)]\n", + " )\n", + " display_image_in_subplot(masked_frame, axes, idx, 2, f\"Dataset: {target_dataset_name} - {noun_phrase} - Frame {sampled_frame_idx} - Masks overlaid\")\n", + "\n", + "plt.tight_layout()\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a2a23152", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.13" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/source_code/sam3/examples/sam3_for_sam1_task_example.ipynb b/source_code/sam3/examples/sam3_for_sam1_task_example.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..18cafbdecca891834306bd29a011f4093205a24a --- /dev/null +++ b/source_code/sam3/examples/sam3_for_sam1_task_example.ipynb @@ -0,0 +1,846 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "f400486b", + "metadata": {}, + "outputs": [], + "source": [ + "# Copyright (c) Meta Platforms, Inc. and affiliates." + ] + }, + { + "cell_type": "markdown", + "id": "a1ae39ff", + "metadata": { + "jp-MarkdownHeadingCollapsed": true + }, + "source": [ + "# Interactive Instance Segmentation using SAM 3" + ] + }, + { + "cell_type": "markdown", + "id": "b4a4b25c", + "metadata": {}, + "source": [ + "Segment Anything Model 3 (SAM 3) predicts instance masks that indicate the desired object given geometric prompts (SAM 1 task).\n", + "The `SAM3Image` and `Sam3Processor` classes provide an easy interface to prompt the model. The user first sets an image using the `Sam3Processor.set_image` method, which computes the necessary image embeddings. Then, prompts can be provided via the `predict` method to efficiently predict masks from those prompts. The model can take as input both point and box prompts, as well as masks from the previous iteration of prediction.\n", + "\n", + "This notebook follows the SAM 2 API for interactive image segmentation.\n", + "\n", + "# \n", + "# \"Open\n", + "# \n" + ] + }, + { + "cell_type": "markdown", + "id": "644532a8", + "metadata": {}, + "source": [ + "## Environment Set-up" + ] + }, + { + "cell_type": "markdown", + "id": "07fabfee", + "metadata": {}, + "source": [ + "First install `sam3` in your environment using the [installation instructions](https://github.com/facebookresearch/sam3?tab=readme-ov-file#installation) in the repository." + ] + }, + { + "cell_type": "markdown", + "id": "0be845da", + "metadata": {}, + "source": [ + "## Set-up" + ] + }, + { + "cell_type": "markdown", + "id": "33681dd1", + "metadata": {}, + "source": [ + "Necessary imports and helper functions for displaying points, boxes, and masks." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "fe773ede", + "metadata": {}, + "outputs": [], + "source": [ + "using_colab = False" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "79250a4e", + "metadata": {}, + "outputs": [], + "source": [ + "if using_colab:\n", + " import torch\n", + " import torchvision\n", + " print(\"PyTorch version:\", torch.__version__)\n", + " print(\"Torchvision version:\", torchvision.__version__)\n", + " print(\"CUDA is available:\", torch.cuda.is_available())\n", + " import sys\n", + " !{sys.executable} -m pip install opencv-python matplotlib scikit-learn\n", + " !{sys.executable} -m pip install 'git+https://github.com/facebookresearch/sam3.git'" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "69b28288", + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "# if using Apple MPS, fall back to CPU for unsupported ops\n", + "os.environ[\"PYTORCH_ENABLE_MPS_FALLBACK\"] = \"1\"\n", + "import numpy as np\n", + "import torch\n", + "import matplotlib.pyplot as plt\n", + "from PIL import Image\n", + "import sam3\n", + "sam3_root = os.path.join(os.path.dirname(sam3.__file__), \"..\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "33a15e2f-c7e1-4e5d-862f-fcb751a60b89", + "metadata": {}, + "outputs": [], + "source": [ + "# select the device for computation\n", + "if torch.cuda.is_available():\n", + " device = torch.device(\"cuda\")\n", + "elif torch.backends.mps.is_available():\n", + " device = torch.device(\"mps\")\n", + "else:\n", + " device = torch.device(\"cpu\")\n", + "print(f\"using device: {device}\")\n", + "\n", + "if device.type == \"cuda\":\n", + " # use bfloat16 for the entire notebook\n", + " torch.autocast(\"cuda\", dtype=torch.bfloat16).__enter__()\n", + " # turn on tfloat32 for Ampere GPUs (https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices)\n", + " if torch.cuda.get_device_properties(0).major >= 8:\n", + " torch.backends.cuda.matmul.allow_tf32 = True\n", + " torch.backends.cudnn.allow_tf32 = True\n", + "elif device.type == \"mps\":\n", + " print(\n", + " \"\\nSupport for MPS devices is preliminary. SAM 3 is trained with CUDA and might \"\n", + " \"give numerically different outputs and sometimes degraded performance on MPS. \"\n", + " \"See e.g. https://github.com/pytorch/pytorch/issues/84936 for a discussion.\"\n", + " )" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "29bc90d5", + "metadata": {}, + "outputs": [], + "source": [ + "np.random.seed(3)\n", + "\n", + "def show_mask(mask, ax, random_color=False, borders = True):\n", + " if random_color:\n", + " color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0)\n", + " else:\n", + " color = np.array([30/255, 144/255, 255/255, 0.6])\n", + " h, w = mask.shape[-2:]\n", + " mask = mask.astype(np.uint8)\n", + " mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1)\n", + " if borders:\n", + " import cv2\n", + " contours, _ = cv2.findContours(mask,cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) \n", + " # Try to smooth contours\n", + " contours = [cv2.approxPolyDP(contour, epsilon=0.01, closed=True) for contour in contours]\n", + " mask_image = cv2.drawContours(mask_image, contours, -1, (1, 1, 1, 0.5), thickness=2) \n", + " ax.imshow(mask_image)\n", + "\n", + "def show_points(coords, labels, ax, marker_size=375):\n", + " pos_points = coords[labels==1]\n", + " neg_points = coords[labels==0]\n", + " ax.scatter(pos_points[:, 0], pos_points[:, 1], color='green', marker='*', s=marker_size, edgecolor='white', linewidth=1.25)\n", + " ax.scatter(neg_points[:, 0], neg_points[:, 1], color='red', marker='*', s=marker_size, edgecolor='white', linewidth=1.25) \n", + "\n", + "def show_box(box, ax):\n", + " x0, y0 = box[0], box[1]\n", + " w, h = box[2] - box[0], box[3] - box[1]\n", + " ax.add_patch(plt.Rectangle((x0, y0), w, h, edgecolor='green', facecolor=(0, 0, 0, 0), lw=2)) \n", + "\n", + "def show_masks(image, masks, scores, point_coords=None, box_coords=None, input_labels=None, borders=True):\n", + " for i, (mask, score) in enumerate(zip(masks, scores)):\n", + " plt.figure(figsize=(10, 10))\n", + " plt.imshow(image)\n", + " show_mask(mask, plt.gca(), borders=borders)\n", + " if point_coords is not None:\n", + " assert input_labels is not None\n", + " show_points(point_coords, input_labels, plt.gca())\n", + " if box_coords is not None:\n", + " # boxes\n", + " show_box(box_coords, plt.gca())\n", + " if len(scores) > 1:\n", + " plt.title(f\"Mask {i+1}, Score: {score:.3f}\", fontsize=18)\n", + " plt.axis('off')\n", + " plt.show()" + ] + }, + { + "cell_type": "markdown", + "id": "23842fb2", + "metadata": {}, + "source": [ + "## Example image" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3c2e4f6b", + "metadata": {}, + "outputs": [], + "source": [ + "image = Image.open(f\"{sam3_root}/assets/images/truck.jpg\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e30125fd", + "metadata": {}, + "outputs": [], + "source": [ + "plt.figure(figsize=(10, 10))\n", + "plt.imshow(image)\n", + "plt.axis('on')\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "id": "98b228b8", + "metadata": {}, + "source": [ + "## Selecting objects with SAM 3" + ] + }, + { + "cell_type": "markdown", + "id": "0bb1927b", + "metadata": {}, + "source": [ + "First, load the SAM 3 model. Running on CUDA and using the default model are recommended for best results." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7e28150b", + "metadata": {}, + "outputs": [], + "source": [ + "from sam3 import build_sam3_image_model\n", + "from sam3.model.sam3_image_processor import Sam3Processor\n", + "\n", + "bpe_path = f\"{sam3_root}/assets/bpe_simple_vocab_16e6.txt.gz\"\n", + "model = build_sam3_image_model(bpe_path=bpe_path, enable_inst_interactivity=True)\n" + ] + }, + { + "cell_type": "markdown", + "id": "c925e829", + "metadata": {}, + "source": [ + "Process the image to produce an image embedding by calling `Sam3Processor.set_image`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d95d48dd", + "metadata": {}, + "outputs": [], + "source": [ + "processor = Sam3Processor(model)\n", + "inference_state = processor.set_image(image)" + ] + }, + { + "cell_type": "markdown", + "id": "d8fc7a46", + "metadata": {}, + "source": [ + "To select the truck, choose a point on it. Points are input to the model in (x,y) format and come with labels 1 (foreground point) or 0 (background point). Multiple points can be input; here we use only one. The chosen point will be shown as a star on the image." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5c69570c", + "metadata": {}, + "outputs": [], + "source": [ + "input_point = np.array([[520, 375]])\n", + "input_label = np.array([1])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a91ba973", + "metadata": {}, + "outputs": [], + "source": [ + "plt.figure(figsize=(10, 10))\n", + "plt.imshow(image)\n", + "show_points(input_point, input_label, plt.gca())\n", + "plt.axis('on')\n", + "plt.show() " + ] + }, + { + "cell_type": "markdown", + "id": "c765e952", + "metadata": {}, + "source": [ + "Predict with `SAM3Image.predict_inst`. The model returns masks, quality predictions for those masks, and low resolution mask logits that can be passed to the next iteration of prediction." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "5373fd68", + "metadata": {}, + "outputs": [], + "source": [ + "masks, scores, logits = model.predict_inst(\n", + " inference_state,\n", + " point_coords=input_point,\n", + " point_labels=input_label,\n", + " multimask_output=True,\n", + ")\n", + "sorted_ind = np.argsort(scores)[::-1]\n", + "masks = masks[sorted_ind]\n", + "scores = scores[sorted_ind]\n", + "logits = logits[sorted_ind]" + ] + }, + { + "cell_type": "markdown", + "id": "c7f0e938", + "metadata": {}, + "source": [ + "With `multimask_output=True` (the default setting), SAM 3 outputs 3 masks, where `scores` gives the model's own estimation of the quality of these masks. This setting is intended for ambiguous input prompts, and helps the model disambiguate different objects consistent with the prompt. When `False`, it will return a single mask. For ambiguous prompts such as a single point, it is recommended to use `multimask_output=True` even if only a single mask is desired; the best single mask can be chosen by picking the one with the highest score returned in `scores`. This will often result in a better mask." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "47821187", + "metadata": {}, + "outputs": [], + "source": [ + "masks.shape # (number_of_masks) x H x W" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e9c227a6", + "metadata": {}, + "outputs": [], + "source": [ + "show_masks(image, masks, scores, point_coords=input_point, input_labels=input_label, borders=True)" + ] + }, + { + "cell_type": "markdown", + "id": "3fa31f7c", + "metadata": {}, + "source": [ + "## Specifying a specific object with additional points" + ] + }, + { + "cell_type": "markdown", + "id": "88d6d29a", + "metadata": {}, + "source": [ + "The single input point is ambiguous, and the model has returned multiple objects consistent with it. To obtain a single object, multiple points can be provided. If available, a mask from a previous iteration can also be supplied to the model to aid in prediction. When specifying a single object with multiple prompts, a single mask can be requested by setting `multimask_output=False`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f6923b94", + "metadata": {}, + "outputs": [], + "source": [ + "input_point = np.array([[500, 375], [1125, 625]])\n", + "input_label = np.array([1, 1])\n", + "\n", + "mask_input = logits[np.argmax(scores), :, :] # Choose the model's best mask" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d98f96a1", + "metadata": {}, + "outputs": [], + "source": [ + "masks, scores, _ = model.predict_inst(\n", + " inference_state,\n", + " point_coords=input_point,\n", + " point_labels=input_label,\n", + " mask_input=mask_input[None, :, :],\n", + " multimask_output=False,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0ce8b82f", + "metadata": {}, + "outputs": [], + "source": [ + "masks.shape" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e06d5c8d", + "metadata": {}, + "outputs": [], + "source": [ + "show_masks(image, masks, scores, point_coords=input_point, input_labels=input_label)" + ] + }, + { + "cell_type": "markdown", + "id": "c93e2087", + "metadata": {}, + "source": [ + "To exclude the car and specify just the window, a background point (with label 0, here shown in red) can be supplied." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "9a196f68", + "metadata": {}, + "outputs": [], + "source": [ + "input_point = np.array([[500, 375], [1125, 625]])\n", + "input_label = np.array([1, 0])\n", + "\n", + "mask_input = logits[np.argmax(scores), :, :] # Choose the model's best mask" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "81a52282", + "metadata": {}, + "outputs": [], + "source": [ + "masks, scores, _ = model.predict_inst(\n", + " inference_state,\n", + " point_coords=input_point,\n", + " point_labels=input_label,\n", + " mask_input=mask_input[None, :, :],\n", + " multimask_output=False,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "bfca709f", + "metadata": {}, + "outputs": [], + "source": [ + "show_masks(image, masks, scores, point_coords=input_point, input_labels=input_label)" + ] + }, + { + "cell_type": "markdown", + "id": "41e2d5a9", + "metadata": {}, + "source": [ + "## Specifying a specific object with a box" + ] + }, + { + "cell_type": "markdown", + "id": "d61ca7ac", + "metadata": {}, + "source": [ + "The model can also take a box as input, provided in xyxy format." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8ea92a7b", + "metadata": {}, + "outputs": [], + "source": [ + "input_box = np.array([425, 600, 700, 875])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "b35a8814", + "metadata": {}, + "outputs": [], + "source": [ + "masks, scores, _ = model.predict_inst(\n", + " inference_state,\n", + " point_coords=None,\n", + " point_labels=None,\n", + " box=input_box[None, :],\n", + " multimask_output=False,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3ffb4906", + "metadata": {}, + "outputs": [], + "source": [ + "show_masks(image, masks, scores, box_coords=input_box)" + ] + }, + { + "cell_type": "markdown", + "id": "c1ed9f0a", + "metadata": {}, + "source": [ + "## Combining points and boxes" + ] + }, + { + "cell_type": "markdown", + "id": "8455d1c5", + "metadata": {}, + "source": [ + "Points and boxes may be combined, just by including both types of prompts to the predictor. Here this can be used to select just the trucks's tire, instead of the entire wheel." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "90e2e547", + "metadata": {}, + "outputs": [], + "source": [ + "input_box = np.array([425, 600, 700, 875])\n", + "input_point = np.array([[575, 750]])\n", + "input_label = np.array([0])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6956d8c4", + "metadata": {}, + "outputs": [], + "source": [ + "masks, scores, logits = model.predict_inst(\n", + " inference_state,\n", + " point_coords=input_point,\n", + " point_labels=input_label,\n", + " box=input_box,\n", + " multimask_output=False,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "eb519a31", + "metadata": {}, + "outputs": [], + "source": [ + "show_masks(image, masks, scores, box_coords=input_box, point_coords=input_point, input_labels=input_label)" + ] + }, + { + "cell_type": "markdown", + "id": "45ddbca3", + "metadata": {}, + "source": [ + "## Batched prompt inputs" + ] + }, + { + "cell_type": "markdown", + "id": "df6f18a0", + "metadata": {}, + "source": [ + "`SAM3Image` can take multiple input prompts for the same image, using `predict_inst` method. For example, imagine we have several box outputs from an object detector." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "0a06681b", + "metadata": {}, + "outputs": [], + "source": [ + "input_boxes = np.array([\n", + " [75, 275, 1725, 850],\n", + " [425, 600, 700, 875],\n", + " [1375, 550, 1650, 800],\n", + " [1240, 675, 1400, 750],\n", + "])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "117521a3", + "metadata": {}, + "outputs": [], + "source": [ + "masks, scores, _ = model.predict_inst(\n", + " inference_state,\n", + " point_coords=None,\n", + " point_labels=None,\n", + " box=input_boxes,\n", + " multimask_output=False,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6a8f5d49", + "metadata": {}, + "outputs": [], + "source": [ + "masks.shape # (batch_size) x (num_predicted_masks_per_input) x H x W" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c00c3681", + "metadata": {}, + "outputs": [], + "source": [ + "plt.figure(figsize=(10, 10))\n", + "plt.imshow(image)\n", + "for mask in masks:\n", + " show_mask(mask.squeeze(0), plt.gca(), random_color=True)\n", + "for box in input_boxes:\n", + " show_box(box, plt.gca())\n", + "plt.axis('off')\n", + "plt.show()" + ] + }, + { + "cell_type": "markdown", + "id": "b9a27b5d", + "metadata": {}, + "source": [ + "## End-to-end batched inference\n", + "If all prompts are available in advance, it is possible to run SAM 3 directly in an end-to-end fashion. This also allows batching over images." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d485f75b", + "metadata": {}, + "outputs": [], + "source": [ + "image1 = image # truck.jpg from above\n", + "image1_boxes = np.array([\n", + " [75, 275, 1725, 850],\n", + " [425, 600, 700, 875],\n", + " [1375, 550, 1650, 800],\n", + " [1240, 675, 1400, 750],\n", + "])\n", + "\n", + "image2 = Image.open(f\"{sam3_root}/assets/images/groceries.jpg\")\n", + "image2_boxes = np.array([\n", + " [450, 170, 520, 350],\n", + " [350, 190, 450, 350],\n", + " [500, 170, 580, 350],\n", + " [580, 170, 640, 350],\n", + "])\n", + "\n", + "img_batch = [image1, image2]\n", + "boxes_batch = [image1_boxes, image2_boxes]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "47932c99", + "metadata": {}, + "outputs": [], + "source": [ + "inference_state = processor.set_image_batch(img_batch)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "97af3c54", + "metadata": {}, + "outputs": [], + "source": [ + "masks_batch, scores_batch, _ = model.predict_inst_batch(\n", + " inference_state,\n", + " None,\n", + " None, \n", + " box_batch=boxes_batch, \n", + " multimask_output=False\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "226df881", + "metadata": {}, + "outputs": [], + "source": [ + "for image, boxes, masks in zip(img_batch, boxes_batch, masks_batch):\n", + " plt.figure(figsize=(10, 10))\n", + " plt.imshow(image) \n", + " for mask in masks:\n", + " show_mask(mask.squeeze(0), plt.gca(), random_color=True)\n", + " for box in boxes:\n", + " show_box(box, plt.gca())" + ] + }, + { + "cell_type": "markdown", + "id": "46f30085", + "metadata": {}, + "source": [ + "Similarly, we can have a batch of point prompts defined over a batch of images" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1ab929fc", + "metadata": {}, + "outputs": [], + "source": [ + "image1 = image # truck.jpg from above\n", + "image1_pts = np.array([\n", + " [[500, 375]],\n", + " [[650, 750]]\n", + " ]) # Bx1x2 where B corresponds to number of objects \n", + "image1_labels = np.array([[1], [1]])\n", + "\n", + "image2_pts = np.array([\n", + " [[400, 300]],\n", + " [[630, 300]],\n", + "])\n", + "image2_labels = np.array([[1], [1]])\n", + "\n", + "pts_batch = [image1_pts, image2_pts]\n", + "labels_batch = [image1_labels, image2_labels]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "848f8287", + "metadata": {}, + "outputs": [], + "source": [ + "masks_batch, scores_batch, _ = model.predict_inst_batch(inference_state, pts_batch, labels_batch, box_batch=None, multimask_output=True)\n", + "\n", + "# Select the best single mask per object\n", + "best_masks = []\n", + "for masks, scores in zip(masks_batch,scores_batch):\n", + " best_masks.append(masks[range(len(masks)), np.argmax(scores, axis=-1)])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "99b15c6c", + "metadata": {}, + "outputs": [], + "source": [ + "for image, points, labels, masks in zip(img_batch, pts_batch, labels_batch, best_masks):\n", + " plt.figure(figsize=(10, 10))\n", + " plt.imshow(image) \n", + " for mask in masks:\n", + " show_mask(mask, plt.gca(), random_color=True)\n", + " show_points(points, labels, plt.gca())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "4c1594a5-a0de-4477-91d4-db4504a78a83", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "74e3d07e-b0de-48a5-9d29-d639a0dbcdfc", + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d8b1de3a-a253-48ff-8a1c-d80742acbe86", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.11" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/source_code/sam3/examples/sam3_image_interactive.ipynb b/source_code/sam3/examples/sam3_image_interactive.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..7e1e76663683bce14611894a2c54eead92c91889 --- /dev/null +++ b/source_code/sam3/examples/sam3_image_interactive.ipynb @@ -0,0 +1,757 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "5d0e0b69", + "metadata": {}, + "outputs": [], + "source": [ + "# Copyright (c) Meta Platforms, Inc. and affiliates." + ] + }, + { + "cell_type": "markdown", + "id": "11912666", + "metadata": {}, + "source": [ + "# \n", + "# \"Open\n", + "# " + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "8517f5f6", + "metadata": {}, + "outputs": [], + "source": [ + "using_colab = False" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "2540e376", + "metadata": {}, + "outputs": [], + "source": [ + "if using_colab:\n", + " import torch\n", + " import torchvision\n", + " print(\"PyTorch version:\", torch.__version__)\n", + " print(\"Torchvision version:\", torchvision.__version__)\n", + " print(\"CUDA is available:\", torch.cuda.is_available())\n", + " import sys\n", + " !{sys.executable} -m pip install opencv-python matplotlib scikit-learn\n", + " !{sys.executable} -m pip install 'git+https://github.com/facebookresearch/sam3.git'" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "90073483-58f6-404e-90ac-c22efcd76216", + "metadata": {}, + "outputs": [], + "source": [ + "%matplotlib widget" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "13325376-658b-48d6-8528-2a006f223d44", + "metadata": {}, + "outputs": [], + "source": [ + "import torch\n", + "# turn on tfloat32 for Ampere GPUs\n", + "# https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices\n", + "torch.backends.cuda.matmul.allow_tf32 = True\n", + "torch.backends.cudnn.allow_tf32 = True\n", + "\n", + "# use bfloat16 for the entire notebook. If your card doesn't support it, try float16 instead\n", + "torch.autocast(\"cuda\", dtype=torch.bfloat16).__enter__()\n", + "\n", + "# inference mode for the whole notebook. Disable if you need gradients\n", + "torch.inference_mode().__enter__()" + ] + }, + { + "cell_type": "markdown", + "id": "fb863772-56a9-4ee2-be52-5d8933066519", + "metadata": {}, + "source": [ + "# Load the model" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "f84b4ccc-9db2-4d88-ac8f-4c272694d25a", + "metadata": {}, + "outputs": [], + "source": [ + "import sam3\n", + "from sam3 import build_sam3_image_model\n", + "import os\n", + "sam3_root = os.path.join(os.path.dirname(sam3.__file__), \"..\")\n", + "bpe_path = f\"{sam3_root}/assets/bpe_simple_vocab_16e6.txt.gz\"" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "de01a36e-1221-4497-a5ab-e6c796689480", + "metadata": {}, + "outputs": [], + "source": [ + "model = build_sam3_image_model(bpe_path=bpe_path)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "b01ec8a9-d9f6-4baf-96ac-1e5d21fd90b8", + "metadata": {}, + "outputs": [], + "source": [ + "from sam3.model.sam3_image_processor import Sam3Processor\n", + "processor = Sam3Processor(model)" + ] + }, + { + "cell_type": "markdown", + "id": "e6172a69-35ca-487c-bd67-6f1f1ecb20d5", + "metadata": {}, + "source": [ + "# Jupyter widget" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "2a4ac22f-5d5c-4272-a5a1-dfe0c04253a7", + "metadata": {}, + "outputs": [], + "source": [ + "import io\n", + "\n", + "import ipywidgets as widgets\n", + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "import PIL.Image\n", + "import requests\n", + "from IPython.display import clear_output, display, HTML\n", + "from matplotlib.patches import Rectangle\n", + "\n", + "\n", + "class Sam3SegmentationWidget:\n", + " \"\"\"Interactive Jupyter widget for SAM3 segmentation with text and box prompts.\"\"\"\n", + "\n", + " def __init__(self, processor):\n", + " \"\"\"\n", + " Initialize the segmentation widget.\n", + "\n", + " Args:\n", + " processor: Sam3Processor instance\n", + " \"\"\"\n", + " self.processor = processor\n", + " self.state = None\n", + " self.current_image = None\n", + " self.current_image_array = None\n", + " self.box_mode = \"positive\"\n", + " self.drawing_box = False\n", + " self.box_start = None\n", + " self.current_rect = None\n", + "\n", + " self._setup_ui()\n", + " self._setup_plot()\n", + "\n", + " def _setup_ui(self):\n", + " \"\"\"Set up the UI components.\"\"\"\n", + " self.upload_widget = widgets.FileUpload(\n", + " accept=\"image/*\", multiple=False, description=\"Upload Image\"\n", + " )\n", + " self.upload_widget.observe(self._on_image_upload, names=\"value\")\n", + "\n", + " self.url_input = widgets.Text(\n", + " placeholder=\"Or enter image URL\",\n", + " )\n", + " self.url_button = widgets.Button(description=\"Load URL\", button_style=\"info\")\n", + " self.url_button.on_click(self._on_load_url)\n", + " url_box = widgets.HBox(\n", + " [self.url_input, self.url_button],\n", + " layout=widgets.Layout(width=\"100%\", justify_content=\"space-between\"),\n", + " )\n", + "\n", + " self.text_input = widgets.Text(\n", + " placeholder='Enter segmentation prompt (e.g., \"person\", \"dog\")',\n", + " continuous_update=False,\n", + " )\n", + " self.text_input.observe(self._on_text_submit, names=\"value\")\n", + " self.text_button = widgets.Button(description=\"Segment\", button_style=\"success\")\n", + " self.text_button.on_click(self._on_text_prompt)\n", + " text_box = widgets.HBox(\n", + " [self.text_input, self.text_button],\n", + " layout=widgets.Layout(width=\"100%\", justify_content=\"space-between\"),\n", + " )\n", + "\n", + " self.box_mode_buttons = widgets.ToggleButtons(\n", + " options=[\"Positive Boxes\", \"Negative Boxes\"],\n", + " description=\"Box Mode:\",\n", + " button_style=\"\",\n", + " tooltips=[\n", + " \"Draw boxes around objects to include\",\n", + " \"Draw boxes around objects to exclude\",\n", + " ],\n", + " )\n", + " self.box_mode_buttons.observe(self._on_box_mode_change, names=\"value\")\n", + "\n", + " self.clear_button = widgets.Button(\n", + " description=\"Clear All Prompts\", button_style=\"warning\"\n", + " )\n", + " self.clear_button.on_click(self._on_clear_prompts)\n", + "\n", + " self.confidence_slider = widgets.FloatSlider(\n", + " value=0.5,\n", + " min=0.0,\n", + " max=1.0,\n", + " step=0.01,\n", + " description=\"Confidence:\",\n", + " continuous_update=False,\n", + " style={\"description_width\": \"initial\"},\n", + " )\n", + " self.confidence_slider.observe(self._on_confidence_change, names=\"value\")\n", + "\n", + " self.size_slider = widgets.IntSlider(\n", + " value=960,\n", + " min=300,\n", + " max=2000,\n", + " step=10,\n", + " description=\"Image Size:\",\n", + " continuous_update=False,\n", + " style={\"description_width\": \"initial\"},\n", + " )\n", + " self.size_slider.observe(self._on_size_change, names=\"value\")\n", + "\n", + " slider_box = widgets.HBox(\n", + " [self.confidence_slider, self.size_slider],\n", + " layout=widgets.Layout(justify_content=\"space-between\"),\n", + " )\n", + "\n", + " self.output = widgets.Output()\n", + " self.status_label = widgets.Label(value=\"Upload an image to begin\")\n", + "\n", + " # This box will hold our matplotlib output and we can target it with CSS.\n", + " self.plot_container = widgets.Box([self.output])\n", + " self.plot_container.add_class(\"no-drag\")\n", + "\n", + " # CSS to make the cursor a crosshair over the matplotlib canvas\n", + " css_style = widgets.HTML(\n", + " \"\"\"\n", + " \n", + " \"\"\"\n", + " )\n", + " # Create VBoxes for each accordion pane\n", + " source_pane = widgets.VBox([self.upload_widget, url_box])\n", + " prompt_pane = widgets.VBox(\n", + " [\n", + " widgets.Label(\"Text Prompt:\"),\n", + " text_box,\n", + " self.box_mode_buttons,\n", + " self.confidence_slider,\n", + " self.clear_button,\n", + " ]\n", + " )\n", + " display_pane = widgets.VBox([self.size_slider])\n", + "\n", + " # Create the Accordion to hold the control panes\n", + " self.accordion = widgets.Accordion(\n", + " children=[source_pane, prompt_pane, display_pane]\n", + " )\n", + " self.accordion.set_title(0, \"Image Source\")\n", + " self.accordion.set_title(1, \"Segmentation Prompts\")\n", + " self.accordion.set_title(2, \"Display Settings\")\n", + " self.accordion.selected_index = 0 # Start with the first pane open\n", + "\n", + " # Create the left sidebar for controls\n", + " sidebar = widgets.VBox(\n", + " [self.status_label, widgets.HTML(\"

Controls

\"), self.accordion]\n", + " )\n", + " sidebar.layout = widgets.Layout(\n", + " width=\"380px\",\n", + " min_width=\"380px\",\n", + " max_width=\"380px\",\n", + " border=\"1px solid #e0e0e0\",\n", + " padding=\"10px\",\n", + " margin=\"0 15px 0 0\",\n", + " flex=\"0 0 auto\",\n", + " )\n", + "\n", + " # Create the main area for the image display\n", + " main_area = widgets.VBox([self.plot_container])\n", + " main_area.layout = widgets.Layout(flex=\"1\", min_width=\"500px\", overflow=\"auto\")\n", + "\n", + " # Combine sidebar and main area into the final app layout\n", + " app_layout = widgets.HBox([sidebar, main_area])\n", + " app_layout.layout = widgets.Layout(\n", + " width=\"100%\",\n", + " display=\"flex\",\n", + " flex_flow=\"row\",\n", + " align_items=\"stretch\",\n", + " )\n", + "\n", + " # Set the main container\n", + " self.container = widgets.VBox(\n", + " [\n", + " css_style,\n", + " widgets.HTML(\"

🖼️ SAM3 Interactive Segmentation

\"),\n", + " app_layout,\n", + " ]\n", + " )\n", + "\n", + " def _setup_plot(self):\n", + " \"\"\"Set up the matplotlib figure.\"\"\"\n", + " # plt.ioff()\n", + " self.fig, self.ax = plt.subplots(figsize=(12, 8))\n", + " # plt.ion()\n", + " self.ax.axis(\"off\")\n", + " self.fig.subplots_adjust(left=0, right=1, top=1, bottom=0)\n", + " self.fig.canvas.toolbar_visible = False\n", + " self.fig.canvas.header_visible = False\n", + " self.fig.canvas.footer_visible = False\n", + " self.fig.canvas.resizable = False\n", + "\n", + " # plt.close(self.fig)\n", + "\n", + " def _set_loading(self, is_loading, message=\"Processing...\"):\n", + " \"\"\"Show/hide loading state and disable/enable controls.\"\"\"\n", + " if is_loading:\n", + " self.status_label.value = f\"⏳ {message}\"\n", + " self.upload_widget.disabled = True\n", + " self.url_button.disabled = True\n", + " self.text_button.disabled = True\n", + " self.clear_button.disabled = True\n", + " self.box_mode_buttons.disabled = True\n", + " self.confidence_slider.disabled = True\n", + " else:\n", + " self.upload_widget.disabled = False\n", + " self.url_button.disabled = False\n", + " self.text_button.disabled = False\n", + " self.clear_button.disabled = False\n", + " self.box_mode_buttons.disabled = False\n", + " self.confidence_slider.disabled = False\n", + "\n", + " def _on_image_upload(self, change):\n", + " \"\"\"Handle image upload.\"\"\"\n", + " if change[\"new\"]:\n", + " uploaded_file = change[\"new\"][0]\n", + " image = PIL.Image.open(io.BytesIO(uploaded_file[\"content\"])).convert(\"RGB\")\n", + " self._set_image(image)\n", + "\n", + " def _on_load_url(self, button):\n", + " \"\"\"Handle loading image from URL.\"\"\"\n", + " url = self.url_input.value.strip()\n", + " if not url:\n", + " self.status_label.value = \"Please enter a URL\"\n", + " return\n", + "\n", + " self._set_loading(True, \"Downloading image from URL...\")\n", + "\n", + " try:\n", + " response = requests.get(url, timeout=10)\n", + " response.raise_for_status()\n", + " image = PIL.Image.open(io.BytesIO(response.content)).convert(\"RGB\")\n", + " self._set_image(image)\n", + " except Exception as e:\n", + " self._set_loading(False)\n", + " self.status_label.value = f\"Error loading image: {str(e)}\"\n", + "\n", + " def _set_image(self, image):\n", + " \"\"\"Set the current image, adjust figure size, and initialize state.\"\"\"\n", + " self._set_loading(True, \"Processing image through model...\")\n", + "\n", + " try:\n", + "\n", + " self.current_image = image\n", + " self.current_image_array = np.array(image)\n", + " self.state = self.processor.set_image(image)\n", + " self._set_loading(False)\n", + " self.status_label.value = (\n", + " f\"Image loaded: {image.size[0]}x{image.size[1]} pixels\"\n", + " )\n", + " self._resize_figure()\n", + " self._update_display()\n", + " self._connect_plot_events()\n", + " self.accordion.selected_index = 1\n", + " except Exception as e:\n", + " self._set_loading(False)\n", + " self.status_label.value = f\"Error processing image: {str(e)}\"\n", + "\n", + " def _on_text_submit(self, change):\n", + " \"\"\"Handle text prompt submission via Enter key.\"\"\"\n", + " # Call the same handler as the button click\n", + " self._on_text_prompt(None)\n", + "\n", + " def _on_text_prompt(self, button):\n", + " \"\"\"Handle text prompt submission.\"\"\"\n", + " if self.state is None:\n", + " self.status_label.value = \"Please load an image first\"\n", + " return\n", + "\n", + " prompt = self.text_input.value.strip()\n", + " if not prompt:\n", + " self.status_label.value = \"Please enter a prompt\"\n", + " return\n", + "\n", + " self._set_loading(True, f'Segmenting with prompt: \"{prompt}\"...')\n", + "\n", + " try:\n", + " self.state = self.processor.set_text_prompt(prompt, self.state)\n", + " self._set_loading(False)\n", + " self.status_label.value = f'Segmented with prompt: \"{prompt}\"'\n", + " self._update_display()\n", + " except Exception as e:\n", + " self._set_loading(False)\n", + " self.status_label.value = f\"Error: {str(e)}\"\n", + "\n", + " def _on_box_mode_change(self, change):\n", + " \"\"\"Handle box mode toggle.\"\"\"\n", + " self.box_mode = \"positive\" if change[\"new\"] == \"Positive Boxes\" else \"negative\"\n", + "\n", + " def _on_clear_prompts(self, button):\n", + " \"\"\"Clear all prompts and reset to image only.\"\"\"\n", + " if self.current_image is not None:\n", + " try:\n", + " self._set_loading(True, \"Clearing prompts and resetting...\")\n", + " self.state = self.processor.reset_all_prompts(self.state)\n", + " if \"prompted_boxes\" in self.state:\n", + " del self.state[\"prompted_boxes\"]\n", + " self.text_input.value = \"\"\n", + " self._set_loading(False)\n", + " self.status_label.value = \"Cleared all prompts\"\n", + " self._update_display()\n", + " except Exception as e:\n", + " self._set_loading(False)\n", + " import traceback\n", + "\n", + " self.status_label.value = f\"Error: {str(e)} {traceback.format_exc()}\"\n", + "\n", + " def _on_confidence_change(self, change):\n", + " \"\"\"Handle confidence threshold change.\"\"\"\n", + " if self.state is not None:\n", + " self.state = self.processor.set_confidence_threshold(\n", + " change[\"new\"], self.state\n", + " )\n", + " self._update_display()\n", + "\n", + " def _connect_plot_events(self):\n", + " \"\"\"Connect matplotlib event handlers for box drawing.\"\"\"\n", + " # Disable matplotlib's toolbar navigation to allow custom box drawing\n", + " if hasattr(self.fig.canvas, \"toolbar\") and self.fig.canvas.toolbar is not None:\n", + " self.fig.canvas.toolbar.pan()\n", + " self.fig.canvas.toolbar.pan()\n", + "\n", + " self.fig.canvas.mpl_connect(\"button_press_event\", self._on_press)\n", + " self.fig.canvas.mpl_connect(\"button_release_event\", self._on_release)\n", + " self.fig.canvas.mpl_connect(\"motion_notify_event\", self._on_motion)\n", + "\n", + " def _on_press(self, event):\n", + " \"\"\"Handle mouse press for box drawing.\"\"\"\n", + " if event.inaxes != self.ax:\n", + " return\n", + " self.drawing_box = True\n", + " self.box_start = (event.xdata, event.ydata)\n", + "\n", + " def _on_motion(self, event):\n", + " \"\"\"Handle mouse motion for box preview.\"\"\"\n", + " if not self.drawing_box or event.inaxes != self.ax or self.box_start is None:\n", + " return\n", + "\n", + " if self.current_rect is not None:\n", + " self.current_rect.remove()\n", + "\n", + " x0, y0 = self.box_start\n", + " x1, y1 = event.xdata, event.ydata\n", + " width = x1 - x0\n", + " height = y1 - y0\n", + "\n", + " color = \"green\" if self.box_mode == \"positive\" else \"red\"\n", + " self.current_rect = Rectangle(\n", + " (x0, y0),\n", + " width,\n", + " height,\n", + " fill=False,\n", + " edgecolor=color,\n", + " linewidth=2,\n", + " linestyle=\"--\",\n", + " )\n", + " self.ax.add_patch(self.current_rect)\n", + " self.fig.canvas.draw_idle()\n", + "\n", + " def _on_release(self, event):\n", + " \"\"\"Handle mouse release to finalize box.\"\"\"\n", + " if not self.drawing_box or event.inaxes != self.ax or self.box_start is None:\n", + " self.drawing_box = False\n", + " return\n", + "\n", + " self.drawing_box = False\n", + "\n", + " if self.current_rect is not None:\n", + " self.current_rect.remove()\n", + " self.current_rect = None\n", + "\n", + " if self.state is None:\n", + " return\n", + "\n", + " x0, y0 = self.box_start\n", + " x1, y1 = event.xdata, event.ydata\n", + "\n", + " x_min = min(x0, x1)\n", + " x_max = max(x0, x1)\n", + " y_min = min(y0, y1)\n", + " y_max = max(y0, y1)\n", + "\n", + " if abs(x_max - x_min) < 5 or abs(y_max - y_min) < 5:\n", + " return\n", + "\n", + " # Get image dimensions\n", + " img_h = self.state[\"original_height\"]\n", + " img_w = self.state[\"original_width\"]\n", + "\n", + " # Convert from xyxy pixel coordinates to cxcywh normalized format\n", + " center_x = (x_min + x_max) / 2.0 / img_w\n", + " center_y = (y_min + y_max) / 2.0 / img_h\n", + " width = (x_max - x_min) / img_w\n", + " height = (y_max - y_min) / img_h\n", + "\n", + " box = [center_x, center_y, width, height]\n", + " label = self.box_mode == \"positive\"\n", + " mode_str = \"positive\" if label else \"negative\"\n", + "\n", + " # Store the prompted box in pixel coordinates for display\n", + " if \"prompted_boxes\" not in self.state:\n", + " self.state[\"prompted_boxes\"] = []\n", + " self.state[\"prompted_boxes\"].append(\n", + " {\"box\": [x_min, y_min, x_max, y_max], \"label\": label}\n", + " )\n", + "\n", + " self._set_loading(True, f\"Adding {mode_str} box and re-segmenting...\")\n", + "\n", + " try:\n", + " self.state = self.processor.add_geometric_prompt(box, label, self.state)\n", + " self._set_loading(False)\n", + " self.status_label.value = f\"Added {mode_str} box\"\n", + " self._update_display()\n", + " except Exception as e:\n", + " self._set_loading(False)\n", + " self.status_label.value = f\"Error adding box: {str(e)}\"\n", + "\n", + " def _resize_figure(self):\n", + " \"\"\"Calculate and apply new figure size based on image and slider value.\"\"\"\n", + " if self.current_image is None:\n", + " return\n", + "\n", + " # 1. Get original image dimensions\n", + " img_w, img_h = self.current_image.size\n", + "\n", + " # 2. The slider's value is now the direct target width for the display\n", + " display_w = float(self.size_slider.value)\n", + "\n", + " # 3. Calculate the corresponding height to maintain the original aspect ratio\n", + " aspect_ratio = img_h / img_w\n", + " display_h = int(display_w * aspect_ratio)\n", + "\n", + " # 4. Convert pixel dimensions to inches for Matplotlib and apply\n", + " dpi = self.fig.dpi\n", + " new_figsize = (display_w / dpi, display_h / dpi)\n", + " self.fig.set_size_inches(new_figsize, forward=True)\n", + "\n", + " def _on_size_change(self, change):\n", + " \"\"\"Handle a change from the image size slider.\"\"\"\n", + " if self.current_image is not None:\n", + " self._resize_figure()\n", + " # After resizing the canvas, we must redraw the content\n", + " self._update_display()\n", + "\n", + " def _update_display(self):\n", + " \"\"\"Update the display with current results.\"\"\"\n", + " if self.current_image_array is None:\n", + " return\n", + "\n", + " with self.output:\n", + " clear_output(wait=True)\n", + "\n", + " self.ax.clear()\n", + " self.ax.axis(\"off\")\n", + " self.ax.imshow(self.current_image_array)\n", + "\n", + " if self.state is not None and \"masks\" in self.state:\n", + " masks = self.state.get(\"masks\", [])\n", + " boxes = self.state.get(\"boxes\", [])\n", + " scores = self.state.get(\"scores\", [])\n", + "\n", + " if len(masks) > 0:\n", + " mask_overlay = np.zeros((*self.current_image_array.shape[:2], 4))\n", + "\n", + " for i, (mask, box, score) in enumerate(zip(masks, boxes, scores)):\n", + " mask_np = mask[0].cpu().numpy()\n", + "\n", + " color = plt.cm.tab10(i % 10)[:3]\n", + " mask_overlay[mask_np > 0.5] = (*color, 0.5)\n", + "\n", + " x0, y0, x1, y1 = box.cpu().numpy()\n", + " rect = Rectangle(\n", + " (x0, y0),\n", + " x1 - x0,\n", + " y1 - y0,\n", + " fill=False,\n", + " edgecolor=color,\n", + " linewidth=2,\n", + " )\n", + " self.ax.add_patch(rect)\n", + "\n", + " self.ax.text(\n", + " x0,\n", + " y0 - 5,\n", + " f\"{score:.2f}\",\n", + " color=\"white\",\n", + " fontsize=10,\n", + " bbox=dict(\n", + " facecolor=color, alpha=0.7, edgecolor=\"none\", pad=2\n", + " ),\n", + " )\n", + "\n", + " self.ax.imshow(mask_overlay)\n", + " self.status_label.value = f\"Found {len(masks)} object(s)\"\n", + " else:\n", + " self.status_label.value = (\n", + " \"No objects found above confidence threshold\"\n", + " )\n", + "\n", + " # Display prompted boxes with dashed lines\n", + " if self.state is not None and \"prompted_boxes\" in self.state:\n", + " for prompted_box in self.state[\"prompted_boxes\"]:\n", + " box_coords = prompted_box[\"box\"]\n", + " is_positive = prompted_box[\"label\"]\n", + "\n", + " x0, y0, x1, y1 = box_coords\n", + " color = \"green\" if is_positive else \"red\"\n", + "\n", + " rect = Rectangle(\n", + " (x0, y0),\n", + " x1 - x0,\n", + " y1 - y0,\n", + " fill=False,\n", + " edgecolor=color,\n", + " linewidth=2,\n", + " linestyle=\"--\",\n", + " )\n", + " self.ax.add_patch(rect)\n", + "\n", + " # display(self.fig.canvas)\n", + "\n", + " def display(self):\n", + " display(self.container)\n", + "\n", + " # Add this for more convenient display in notebooks\n", + " def _ipython_display_(self):\n", + " self.display()\n" + ] + }, + { + "cell_type": "markdown", + "id": "1b9bda74-b455-4957-9767-2a46a041b50f", + "metadata": {}, + "source": [ + "# Run!" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "ebfb9b85-2318-4328-bb0e-e93e4a57fefe", + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "ea0e04a1bfd7486b93baae650d87e0b2", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "VBox(children=(HTML(value='\\n