ChipYTY commited on
Commit
bf950d2
·
verified ·
1 Parent(s): e5e4b98

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. source_code/sam3/assets/veval/toy_gt_and_pred/toy_saco_veval_sav_test_eval_res.json +1 -0
  2. source_code/sam3/examples/saco_gold_silver_eval_example.ipynb +0 -0
  3. source_code/sam3/examples/saco_veval_vis_example.ipynb +269 -0
  4. source_code/sam3/examples/sam3_for_sam1_task_example.ipynb +846 -0
  5. source_code/sam3/examples/sam3_image_interactive.ipynb +757 -0
  6. source_code/sam3/examples/sam3_image_predictor_example.ipynb +0 -0
  7. source_code/sam3/medsam3_brats/train_sam3_video_lora_ddp.py +689 -0
  8. source_code/sam3/sam3.egg-info/SOURCES.txt +51 -0
  9. source_code/sam3/sam3/agent/__init__.py +1 -0
  10. source_code/sam3/sam3/agent/client_llm.py +205 -0
  11. source_code/sam3/sam3/agent/client_sam3.py +138 -0
  12. source_code/sam3/sam3/agent/helpers/__init__.py +1 -0
  13. source_code/sam3/sam3/agent/helpers/memory.py +87 -0
  14. source_code/sam3/sam3/agent/system_prompts/system_prompt.txt +242 -0
  15. source_code/sam3/sam3/agent/system_prompts/system_prompt_iterative_checking.txt +26 -0
  16. source_code/sam3/sam3/eval/cgf1_eval.py +703 -0
  17. source_code/sam3/sam3/eval/conversion_util.py +211 -0
  18. source_code/sam3/sam3/eval/hota_eval_toolkit/run_ytvis_eval.py +114 -0
  19. source_code/sam3/sam3/eval/hota_eval_toolkit/trackeval/datasets/_base_dataset.py +379 -0
  20. source_code/sam3/sam3/eval/hota_eval_toolkit/trackeval/metrics/__init__.py +4 -0
  21. source_code/sam3/sam3/eval/postprocessors.py +648 -0
  22. source_code/sam3/sam3/eval/saco_veval_evaluators.py +838 -0
  23. source_code/sam3/sam3/eval/teta_eval_toolkit/_timing.py +69 -0
  24. source_code/sam3/sam3/eval/teta_eval_toolkit/datasets/tao.py +659 -0
  25. source_code/sam3/sam3/eval/teta_eval_toolkit/eval.py +275 -0
  26. source_code/sam3/sam3/eval/teta_eval_toolkit/metrics/_base_metric.py +148 -0
  27. source_code/sam3/sam3/eval/ytvis_eval.py +411 -0
  28. source_code/sam3/sam3/model/act_ckpt_utils.py +114 -0
  29. source_code/sam3/sam3/model/encoder.py +594 -0
  30. source_code/sam3/sam3/model/maskformer_segmentation.py +323 -0
  31. source_code/sam3/sam3/model/sam1_task_predictor.py +458 -0
  32. source_code/sam3/sam3/model/sam3_image_processor.py +222 -0
  33. source_code/sam3/sam3/model/sam3_tracker_utils.py +427 -0
  34. source_code/sam3/sam3/model/sam3_tracking_predictor.py +1368 -0
  35. source_code/sam3/sam3/model/sam3_video_inference.py +1709 -0
  36. source_code/sam3/sam3/model_builder.py +793 -0
  37. source_code/sam3/sam3/perflib/__init__.py +8 -0
  38. source_code/sam3/sam3/perflib/compile.py +99 -0
  39. source_code/sam3/sam3/perflib/fa3.py +27 -0
  40. source_code/sam3/sam3/perflib/masks_ops.py +69 -0
  41. source_code/sam3/sam3/perflib/nms.py +91 -0
  42. source_code/sam3/sam3/perflib/tests/tests.py +59 -0
  43. source_code/sam3/sam3/perflib/triton/connected_components.py +468 -0
  44. source_code/sam3/sam3/sam/prompt_encoder.py +243 -0
  45. source_code/sam3/sam3/train/configs/eval_base.yaml +279 -0
  46. source_code/sam3/sam3/train/configs/gold_image_evals/sam3_gold_image_attributes.yaml +66 -0
  47. source_code/sam3/sam3/train/configs/gold_image_evals/sam3_gold_image_metaclip_nps.yaml +66 -0
  48. source_code/sam3/sam3/train/configs/odinw13/odinw_text_only_positive.yaml +253 -0
  49. source_code/sam3/sam3/train/configs/odinw13/odinw_visual_only.yaml +256 -0
  50. source_code/sam3/sam3/train/configs/roboflow_v100/roboflow_v100_full_ft_100_images.yaml +539 -0
source_code/sam3/assets/veval/toy_gt_and_pred/toy_saco_veval_sav_test_eval_res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"dataset_results": {"video_bbox_mAP_50_95": 0.25926449787835926, "video_mask_mAP_50_95": 0.34191419141914187, "video_bbox_phrase_ap_50_95": 0.1371423984673839, "video_bbox_phrase_ap_50": 0.3000915217572178, "video_bbox_phrase_ap_75": 0.08651579443658651, "video_mask_phrase_ap_50_95": 0.1935287184180603, "video_mask_phrase_ap_50": 0.3513242920930748, "video_mask_phrase_ap_75": 0.18776520509193778, "video_mask_teta": 50.647, "video_mask_loc_a": 50.969, "video_mask_assoc_a": 49.191, "video_mask_cls_a": 51.781, "video_mask_loc_re": 53.646, "video_mask_loc_pr": 59.689, "video_mask_assoc_re": 53.932, "video_mask_assoc_pr": 55.295, "video_mask_cls_re": 69.966, "video_mask_cls_pr": 51.792, "video_bbox_all_phrase_HOTA": 0.45019177261810006, "video_bbox_all_phrase_DetA": 0.3065446597162029, "video_bbox_all_phrase_AssA": 0.6620514646913795, "video_bbox_all_phrase_DetRe": 0.34337909467405614, "video_bbox_all_phrase_DetPr": 0.672412162481676, "video_bbox_all_phrase_AssRe": 0.7418235522588219, "video_bbox_all_phrase_AssPr": 0.7588797497822176, "video_bbox_all_phrase_LocA": 0.8264969431924951, "video_bbox_all_phrase_OWTA": 0.4767458776352726, "video_mask_all_phrase_HOTA": 0.4656079602771566, "video_mask_all_phrase_DetA": 0.31732796518831874, "video_mask_all_phrase_AssA": 0.6843555199075559, "video_mask_all_phrase_DetRe": 0.35291773825479045, "video_mask_all_phrase_DetPr": 0.6910909348843808, "video_mask_all_phrase_AssRe": 0.7573349011890161, "video_mask_all_phrase_AssPr": 0.7716315286320553, "video_mask_all_phrase_LocA": 0.8328852506118952, "video_mask_all_phrase_OWTA": 0.4913272309590218, "video_bbox_demo_precision_50_95": 0.3349983250083749, "video_bbox_demo_recall_50_95": 0.20302968778882485, "video_bbox_demo_f1_50_95": 0.2527916520815702, "video_bbox_demo_precision_50": 0.6499967500162499, "video_bbox_demo_recall_50": 0.3939382001872721, "video_bbox_demo_f1_50": 0.49051719921648634, "video_bbox_demo_precision_75": 0.29999850000749995, "video_bbox_demo_recall_75": 0.18181763085566405, "video_bbox_demo_f1_75": 0.2263672578625684, "video_bbox_demo_pmf1_50_95": 0.34412433298399697, "video_bbox_demo_ilmcc_50_95": 0.6546535992794136, "video_bbox_demo_cgf1_50_95": 0.22528223318760104, "video_bbox_demo_pmf1_w0dt_50_95": 0.29496371398628307, "video_bbox_demo_cgf1_w0dt_50_95": 0.19309905701794375, "video_bbox_demo_positive_micro_f1_50_95": 0.2527916520815702, "video_bbox_demo_cgf1_micro_50_95": 0.1654909649029892, "video_bbox_demo_pmf1_50": 0.6499302549834214, "video_bbox_demo_ilmcc_50": 0.6546535992794136, "video_bbox_demo_cgf1_50": 0.42547918070548385, "video_bbox_demo_positive_micro_f1_50": 0.49051719921648634, "video_bbox_demo_cgf1_micro_50": 0.3211188499755299, "video_bbox_demo_pmf1_75": 0.3666121729955099, "video_bbox_demo_ilmcc_75": 0.6546535992794136, "video_bbox_demo_cgf1_75": 0.2400039785911576, "video_bbox_demo_positive_micro_f1_75": 0.2263672578625684, "video_bbox_demo_cgf1_micro_75": 0.14819214011874154, "video_mask_demo_precision_50_95": 0.4049979750101249, "video_mask_demo_recall_50_95": 0.24545380165514646, "video_mask_demo_f1_50_95": 0.30562163642146994, "video_mask_demo_precision_50": 0.6999965000174999, "video_mask_demo_recall_50": 0.4242411386632161, "video_mask_demo_f1_50": 0.5282529055527269, "video_mask_demo_precision_75": 0.39999800000999997, "video_mask_demo_recall_75": 0.24242350780755206, "video_mask_demo_f1_75": 0.3018386687406093, "video_mask_demo_pmf1_50_95": 0.42578319170459134, "video_mask_demo_ilmcc_50_95": 0.6546535992794136, "video_mask_demo_cgf1_50_95": 0.2787404989620873, "video_mask_demo_pmf1_w0dt_50_95": 0.3649570214610783, "video_mask_demo_cgf1_w0dt_50_95": 0.23892042768178906, "video_mask_demo_positive_micro_f1_50_95": 0.30562163642146994, "video_mask_demo_cgf1_micro_50_95": 0.2000763043009796, "video_mask_demo_pmf1_50": 0.691595879731724, "video_mask_demo_ilmcc_50": 0.6546535992794136, "video_mask_demo_cgf1_50": 0.4527557319131855, "video_mask_demo_positive_micro_f1_50": 0.5282529055527269, "video_mask_demo_cgf1_micro_50": 0.34582266594990074, "video_mask_demo_pmf1_75": 0.44161046360299766, "video_mask_demo_ilmcc_75": 0.6546535992794136, "video_mask_demo_cgf1_75": 0.2891018794771529, "video_mask_demo_positive_micro_f1_75": 0.3018386687406093, "video_mask_demo_cgf1_micro_75": 0.19759977089274647}, "video_np_results": [{"video_id": 0, "category_id": 847, "bbox_HOTA": 0.5142922923345529, "bbox_DetA": 0.4728133947307452, "bbox_AssA": 0.5594614107579609, "bbox_DetRe": 0.5232999318999904, "bbox_DetPr": 0.6740601503759398, "bbox_AssRe": 0.6250872094372617, "bbox_AssPr": 0.6825128274969564, "bbox_LocA": 0.7656278351108177, "bbox_OWTA": 0.5404796613713161, "mask_HOTA": 0.5531074184495137, "mask_DetA": 0.5015830888530848, "mask_AssA": 0.6104397175171865, "mask_DetRe": 0.5479132211304601, "mask_DetPr": 0.7057644110275689, "mask_AssRe": 0.6703397116425474, "mask_AssPr": 0.7231025840584305, "mask_LocA": 0.7775805526027232, "mask_OWTA": 0.5779237956098605, "bbox_TP_50_95": 0.8, "bbox_FP_50_95": 3.2, "bbox_FN_50_95": 3.2, "bbox_F1_50_95": 0.2, "bbox_TP_50": 2.0, "bbox_FP_50": 2.0, "bbox_FN_50": 2.0, "bbox_F1_50": 0.5, "bbox_TP_75": 0.0, "bbox_FP_75": 4.0, "bbox_FN_75": 4.0, "bbox_F1_75": 0.0, "mask_TP_50_95": 1.4, "mask_FP_50_95": 2.6, "mask_FN_50_95": 2.6, "mask_F1_50_95": 0.35, "mask_TP_50": 3.0, "mask_FP_50": 1.0, "mask_FN_50": 1.0, "mask_F1_50": 0.75, "mask_TP_75": 0.0, "mask_FP_75": 4.0, "mask_FN_75": 4.0, "mask_F1_75": 0.0}, {"video_id": 0, "category_id": 1390, "bbox_HOTA": 0.7406733177321637, "bbox_DetA": 0.7406733177321637, "bbox_AssA": 0.7406733177321637, "bbox_DetRe": 0.8079854809437387, "bbox_DetPr": 0.8428625520636122, "bbox_AssRe": 0.8079854809437387, "bbox_AssPr": 0.8428625520636122, "bbox_LocA": 0.8823614751992302, "bbox_OWTA": 0.7727325543244231, "mask_HOTA": 0.8154208948182305, "mask_DetA": 0.8154208948182305, "mask_AssA": 0.8154208948182304, "mask_DetRe": 0.8508166969147005, "mask_DetPr": 0.8875425975009466, "mask_AssRe": 0.8508166969147005, "mask_AssPr": 0.8875425975009466, "mask_LocA": 0.8885850303003239, "mask_OWTA": 0.8323023646534773, "bbox_TP_50_95": 0.6, "bbox_FP_50_95": 0.4, "bbox_FN_50_95": 0.4, "bbox_F1_50_95": 0.6, "bbox_TP_50": 1.0, "bbox_FP_50": 0.0, "bbox_FN_50": 0.0, "bbox_F1_50": 1.0, "bbox_TP_75": 1.0, "bbox_FP_75": 0.0, "bbox_FN_75": 0.0, "bbox_F1_75": 1.0, "mask_TP_50_95": 0.8, "mask_FP_50_95": 0.2, "mask_FN_50_95": 0.2, "mask_F1_50_95": 0.8, "mask_TP_50": 1.0, "mask_FP_50": 0.0, "mask_FN_50": 0.0, "mask_F1_50": 1.0, "mask_TP_75": 1.0, "mask_FP_75": 0.0, "mask_FN_75": 0.0, "mask_F1_75": 1.0}, {"video_id": 0, "category_id": 1985, "bbox_HOTA": 0.6033247555682881, "bbox_DetA": 0.582364339356503, "bbox_AssA": 0.6261229142494006, "bbox_DetRe": 0.731940716536352, "bbox_DetPr": 0.6764452113891285, "bbox_AssRe": 0.7595503987929015, "bbox_AssPr": 0.7140399793949845, "bbox_LocA": 0.8359470620317413, "bbox_OWTA": 0.6767785755234768, "mask_HOTA": 0.6327138779062043, "mask_DetA": 0.607379512629533, "mask_AssA": 0.661378852549867, "mask_DetRe": 0.7515462714435757, "mask_DetPr": 0.6945642795513374, "mask_AssRe": 0.7849752407175019, "mask_AssPr": 0.7406062552585059, "mask_LocA": 0.8390964342806022, "mask_OWTA": 0.70480310037791, "bbox_TP_50_95": 1.5, "bbox_FP_50_95": 2.5, "bbox_FN_50_95": 2.5, "bbox_F1_50_95": 0.375, "bbox_TP_50": 4.0, "bbox_FP_50": 0.0, "bbox_FN_50": 0.0, "bbox_F1_50": 1.0, "bbox_TP_75": 1.0, "bbox_FP_75": 3.0, "bbox_FN_75": 3.0, "bbox_F1_75": 0.25, "mask_TP_50_95": 1.7, "mask_FP_50_95": 2.3, "mask_FN_50_95": 2.3, "mask_F1_50_95": 0.425, "mask_TP_50": 4.0, "mask_FP_50": 0.0, "mask_FN_50": 0.0, "mask_F1_50": 1.0, "mask_TP_75": 1.0, "mask_FP_75": 3.0, "mask_FN_75": 3.0, "mask_F1_75": 0.25}, {"video_id": 0, "category_id": 3802, "bbox_HOTA": 0.47649622572399936, "bbox_DetA": 0.433826727090632, "bbox_AssA": 0.5248504404498464, "bbox_DetRe": 0.6931060044477392, "bbox_DetPr": 0.49540798304486033, "bbox_AssRe": 0.5970635788657445, "bbox_AssPr": 0.6521277350832161, "bbox_LocA": 0.8179341001135264, "bbox_OWTA": 0.6029168695170898, "mask_HOTA": 0.48643902599730804, "mask_DetA": 0.45408850965718345, "mask_AssA": 0.5231400608341074, "mask_DetRe": 0.7197924388435877, "mask_DetPr": 0.5144825150123632, "mask_AssRe": 0.5970712271676157, "mask_AssPr": 0.6408536817832409, "mask_LocA": 0.8335384601792232, "mask_OWTA": 0.613243699873648, "bbox_TP_50_95": 1.2, "bbox_FP_50_95": 4.8, "bbox_FN_50_95": 2.8, "bbox_F1_50_95": 0.24000000000000005, "bbox_TP_50": 2.0, "bbox_FP_50": 4.0, "bbox_FN_50": 2.0, "bbox_F1_50": 0.4, "bbox_TP_75": 1.0, "bbox_FP_75": 5.0, "bbox_FN_75": 3.0, "bbox_F1_75": 0.2, "mask_TP_50_95": 1.4, "mask_FP_50_95": 4.6, "mask_FN_50_95": 2.6, "mask_F1_50_95": 0.28, "mask_TP_50": 2.0, "mask_FP_50": 4.0, "mask_FN_50": 2.0, "mask_F1_50": 0.4, "mask_TP_75": 2.0, "mask_FP_75": 4.0, "mask_FN_75": 2.0, "mask_F1_75": 0.4}, {"video_id": 0, "category_id": 3827, "bbox_HOTA": 0.7782356487108129, "bbox_DetA": 0.7707506578022777, "bbox_AssA": 0.7864050337062197, "bbox_DetRe": 0.8251451746704767, "bbox_DetPr": 0.8309663046505151, "bbox_AssRe": 0.8370220441715713, "bbox_AssPr": 0.8427028310094968, "bbox_LocA": 0.8515357797457908, "bbox_OWTA": 0.8052026625060156, "mask_HOTA": 0.7841937402779418, "mask_DetA": 0.7773711399985175, "mask_AssA": 0.7919510612320402, "mask_DetRe": 0.8283712784588442, "mask_DetPr": 0.8342151675485008, "mask_AssRe": 0.8407743682610512, "mask_AssPr": 0.845985151807301, "mask_LocA": 0.851867149412845, "mask_OWTA": 0.8096925025911031, "bbox_TP_50_95": 2.6, "bbox_FP_50_95": 1.4, "bbox_FN_50_95": 1.4, "bbox_F1_50_95": 0.65, "bbox_TP_50": 4.0, "bbox_FP_50": 0.0, "bbox_FN_50": 0.0, "bbox_F1_50": 1.0, "bbox_TP_75": 3.0, "bbox_FP_75": 1.0, "bbox_FN_75": 1.0, "bbox_F1_75": 0.75, "mask_TP_50_95": 2.8, "mask_FP_50_95": 1.2, "mask_FN_50_95": 1.2, "mask_F1_50_95": 0.7, "mask_TP_50": 4.0, "mask_FP_50": 0.0, "mask_FN_50": 0.0, "mask_F1_50": 1.0, "mask_TP_75": 4.0, "mask_FP_75": 0.0, "mask_FN_75": 0.0, "mask_F1_75": 1.0}, {"video_id": 0, "category_id": 49272, "bbox_HOTA": 0.0, "bbox_DetA": 0.0, "bbox_AssA": 0.0, "bbox_DetRe": 0.0, "bbox_DetPr": 0.0, "bbox_AssRe": 0.0, "bbox_AssPr": 0.0, "bbox_LocA": 1.0, "bbox_OWTA": 0.0, "mask_HOTA": 0.0, "mask_DetA": 0.0, "mask_AssA": 0.0, "mask_DetRe": 0.0, "mask_DetPr": 0.0, "mask_AssRe": 0.0, "mask_AssPr": 0.0, "mask_LocA": 1.0, "mask_OWTA": 0.0, "bbox_TP_50_95": 0.0, "bbox_FP_50_95": 0.0, "bbox_FN_50_95": 12.0, "bbox_F1_50_95": 0.0, "bbox_TP_50": 0.0, "bbox_FP_50": 0.0, "bbox_FN_50": 12.0, "bbox_F1_50": 0.0, "bbox_TP_75": 0.0, "bbox_FP_75": 0.0, "bbox_FN_75": 12.0, "bbox_F1_75": 0.0, "mask_TP_50_95": 0.0, "mask_FP_50_95": 0.0, "mask_FN_50_95": 12.0, "mask_F1_50_95": 0.0, "mask_TP_50": 0.0, "mask_FP_50": 0.0, "mask_FN_50": 12.0, "mask_F1_50": 0.0, "mask_TP_75": 0.0, "mask_FP_75": 0.0, "mask_FN_75": 12.0, "mask_F1_75": 0.0}, {"video_id": 0, "category_id": 49504, "bbox_HOTA": 0.0, "bbox_DetA": 0.0, "bbox_AssA": 0.0, "bbox_DetRe": 0.0, "bbox_DetPr": 0.0, "bbox_AssRe": 0.0, "bbox_AssPr": 0.0, "bbox_LocA": 1.0, "bbox_OWTA": 0.0, "mask_HOTA": 0.0, "mask_DetA": 0.0, "mask_AssA": 0.0, "mask_DetRe": 0.0, "mask_DetPr": 0.0, "mask_AssRe": 0.0, "mask_AssPr": 0.0, "mask_LocA": 1.0, "mask_OWTA": 0.0, "bbox_TP_50_95": 0.0, "bbox_FP_50_95": 1.0, "bbox_FN_50_95": 4.0, "bbox_F1_50_95": 0.0, "bbox_TP_50": 0.0, "bbox_FP_50": 1.0, "bbox_FN_50": 4.0, "bbox_F1_50": 0.0, "bbox_TP_75": 0.0, "bbox_FP_75": 1.0, "bbox_FN_75": 4.0, "bbox_F1_75": 0.0, "mask_TP_50_95": 0.0, "mask_FP_50_95": 1.0, "mask_FN_50_95": 4.0, "mask_F1_50_95": 0.0, "mask_TP_50": 0.0, "mask_FP_50": 1.0, "mask_FN_50": 4.0, "mask_F1_50": 0.0, "mask_TP_75": 0.0, "mask_FP_75": 1.0, "mask_FN_75": 4.0, "mask_F1_75": 0.0}, {"video_id": 0, "category_id": 50554, "bbox_TP_50_95": 0.0, "bbox_FP_50_95": 0.0, "bbox_FN_50_95": 0.0, "bbox_F1_50_95": 1.0, "bbox_TP_50": 0.0, "bbox_FP_50": 0.0, "bbox_FN_50": 0.0, "bbox_F1_50": 1.0, "bbox_TP_75": 0.0, "bbox_FP_75": 0.0, "bbox_FN_75": 0.0, "bbox_F1_75": 1.0, "mask_TP_50_95": 0.0, "mask_FP_50_95": 0.0, "mask_FN_50_95": 0.0, "mask_F1_50_95": 1.0, "mask_TP_50": 0.0, "mask_FP_50": 0.0, "mask_FN_50": 0.0, "mask_F1_50": 1.0, "mask_TP_75": 0.0, "mask_FP_75": 0.0, "mask_FN_75": 0.0, "mask_F1_75": 1.0}]}
source_code/sam3/examples/saco_gold_silver_eval_example.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
source_code/sam3/examples/saco_veval_vis_example.ipynb ADDED
@@ -0,0 +1,269 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "id": "37048f21",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "# Copyright (c) Meta Platforms, Inc. and affiliates."
11
+ ]
12
+ },
13
+ {
14
+ "cell_type": "code",
15
+ "execution_count": null,
16
+ "id": "154d8663",
17
+ "metadata": {},
18
+ "outputs": [],
19
+ "source": [
20
+ "using_colab = False"
21
+ ]
22
+ },
23
+ {
24
+ "cell_type": "code",
25
+ "execution_count": null,
26
+ "id": "b85d99d9",
27
+ "metadata": {},
28
+ "outputs": [],
29
+ "source": [
30
+ "if using_colab:\n",
31
+ " import torch\n",
32
+ " import torchvision\n",
33
+ " print(\"PyTorch version:\", torch.__version__)\n",
34
+ " print(\"Torchvision version:\", torchvision.__version__)\n",
35
+ " print(\"CUDA is available:\", torch.cuda.is_available())\n",
36
+ " import sys\n",
37
+ " !{sys.executable} -m pip install opencv-python matplotlib scikit-learn\n",
38
+ " !{sys.executable} -m pip install 'git+https://github.com/facebookresearch/sam3.git'"
39
+ ]
40
+ },
41
+ {
42
+ "cell_type": "code",
43
+ "execution_count": null,
44
+ "id": "da21a3bc",
45
+ "metadata": {},
46
+ "outputs": [],
47
+ "source": [
48
+ "import os\n",
49
+ "from glob import glob\n",
50
+ "\n",
51
+ "import numpy as np\n",
52
+ "import utils\n",
53
+ "\n",
54
+ "from matplotlib import pyplot as plt\n",
55
+ "\n",
56
+ "COLORS = utils.pascal_color_map()[1:]"
57
+ ]
58
+ },
59
+ {
60
+ "cell_type": "markdown",
61
+ "id": "57e85e7e",
62
+ "metadata": {},
63
+ "source": [
64
+ "1. Load the data"
65
+ ]
66
+ },
67
+ {
68
+ "cell_type": "code",
69
+ "execution_count": null,
70
+ "id": "a796734e",
71
+ "metadata": {},
72
+ "outputs": [],
73
+ "source": [
74
+ "# Preapre the data path\n",
75
+ "DATA_DIR = \"./sam3_saco_veval_data\" # PUT YOUR DATA PATH HERE\n",
76
+ "ANNOT_DIR = os.path.join(DATA_DIR, \"annotation\")\n",
77
+ "\n",
78
+ "# Load the SACO/Veval annotation files\n",
79
+ "annot_file_list = glob(os.path.join(ANNOT_DIR, \"*veval*.json\"))\n",
80
+ "annot_dfs = utils.get_annot_dfs(file_list=annot_file_list)"
81
+ ]
82
+ },
83
+ {
84
+ "cell_type": "markdown",
85
+ "id": "74bf92b1",
86
+ "metadata": {},
87
+ "source": [
88
+ "Show the annotation files being loaded"
89
+ ]
90
+ },
91
+ {
92
+ "cell_type": "code",
93
+ "execution_count": null,
94
+ "id": "a95620ec",
95
+ "metadata": {},
96
+ "outputs": [],
97
+ "source": [
98
+ "annot_dfs.keys()"
99
+ ]
100
+ },
101
+ {
102
+ "cell_type": "markdown",
103
+ "id": "5ce211d3",
104
+ "metadata": {},
105
+ "source": [
106
+ "2. Examples of the data format"
107
+ ]
108
+ },
109
+ {
110
+ "cell_type": "code",
111
+ "execution_count": null,
112
+ "id": "6ba749db",
113
+ "metadata": {},
114
+ "outputs": [],
115
+ "source": [
116
+ "annot_dfs[\"saco_veval_yt1b_val\"].keys()"
117
+ ]
118
+ },
119
+ {
120
+ "cell_type": "code",
121
+ "execution_count": null,
122
+ "id": "4b6dc186",
123
+ "metadata": {},
124
+ "outputs": [],
125
+ "source": [
126
+ "annot_dfs[\"saco_veval_yt1b_val\"][\"info\"]"
127
+ ]
128
+ },
129
+ {
130
+ "cell_type": "code",
131
+ "execution_count": null,
132
+ "id": "c41091b3",
133
+ "metadata": {},
134
+ "outputs": [],
135
+ "source": [
136
+ "annot_dfs[\"saco_veval_yt1b_val\"][\"videos\"].head(3)"
137
+ ]
138
+ },
139
+ {
140
+ "cell_type": "code",
141
+ "execution_count": null,
142
+ "id": "a7df5771",
143
+ "metadata": {},
144
+ "outputs": [],
145
+ "source": [
146
+ "annot_dfs[\"saco_veval_yt1b_val\"][\"annotations\"].head(3)"
147
+ ]
148
+ },
149
+ {
150
+ "cell_type": "code",
151
+ "execution_count": null,
152
+ "id": "24d2861c",
153
+ "metadata": {},
154
+ "outputs": [],
155
+ "source": [
156
+ "annot_dfs[\"saco_veval_yt1b_val\"][\"categories\"].head(3)"
157
+ ]
158
+ },
159
+ {
160
+ "cell_type": "code",
161
+ "execution_count": null,
162
+ "id": "f9f98f27",
163
+ "metadata": {},
164
+ "outputs": [],
165
+ "source": [
166
+ "annot_dfs[\"saco_veval_yt1b_val\"][\"video_np_pairs\"].head(3)"
167
+ ]
168
+ },
169
+ {
170
+ "cell_type": "markdown",
171
+ "id": "5673a63f",
172
+ "metadata": {},
173
+ "source": [
174
+ "3. Visualize the data"
175
+ ]
176
+ },
177
+ {
178
+ "cell_type": "code",
179
+ "execution_count": null,
180
+ "id": "da827d09",
181
+ "metadata": {},
182
+ "outputs": [],
183
+ "source": [
184
+ "# Select a target dataset\n",
185
+ "target_dataset_name = \"saco_veval_yt1b_val\"\n",
186
+ "\n",
187
+ "# visualize a random positive video-np pair\n",
188
+ "df_pairs = annot_dfs[target_dataset_name][\"video_np_pairs\"]\n",
189
+ "df_positive_pairs = df_pairs[df_pairs.num_masklets > 0]\n",
190
+ "rand_idx = np.random.randint(len(df_positive_pairs))\n",
191
+ "pair_row = df_positive_pairs.iloc[rand_idx]\n",
192
+ "video_id = pair_row.video_id\n",
193
+ "noun_phrase = pair_row.noun_phrase\n",
194
+ "print(f\"Randomly selected video-np pair: video_id={video_id}, noun_phrase={noun_phrase}\")\n",
195
+ "\n",
196
+ "def display_image_in_subplot(img, axes, row, col, title=\"\"):\n",
197
+ " axes[row, col].imshow(img)\n",
198
+ " axes[row, col].set_title(title)\n",
199
+ " axes[row, col].axis('off')\n",
200
+ "\n",
201
+ "num_frames_to_show = 5 # Number of frames to show per dataset\n",
202
+ "every_n_frames = 4 # Interval between frames to show\n",
203
+ "\n",
204
+ "fig, axes = plt.subplots(num_frames_to_show, 3, figsize=(15, 5 * num_frames_to_show))\n",
205
+ "\n",
206
+ "for idx in range(0, num_frames_to_show):\n",
207
+ " sampled_frame_idx = idx * every_n_frames\n",
208
+ " print(f\"Reading annotations for frame {sampled_frame_idx}\")\n",
209
+ " # Get the frame and the corresponding masks and noun phrases\n",
210
+ " frame, annot_masks, annot_noun_phrases = utils.get_all_annotations_for_frame(\n",
211
+ " annot_dfs[target_dataset_name], video_id=video_id, frame_idx=sampled_frame_idx, data_dir=DATA_DIR, dataset=target_dataset_name\n",
212
+ " )\n",
213
+ " # Filter masks and noun phrases by the selected noun phrase\n",
214
+ " annot_masks = [m for m, np in zip(annot_masks, annot_noun_phrases) if np == noun_phrase]\n",
215
+ "\n",
216
+ " # Show the frame\n",
217
+ " display_image_in_subplot(frame, axes, idx, 0, f\"{target_dataset_name} - {noun_phrase} - Frame {sampled_frame_idx}\")\n",
218
+ "\n",
219
+ " # Show the annotated masks\n",
220
+ " if annot_masks is None:\n",
221
+ " print(f\"No masks found for video_id {video_id} at frame {sampled_frame_idx}\")\n",
222
+ " else:\n",
223
+ " # Show all masks over a white background\n",
224
+ " all_masks = utils.draw_masks_to_frame(\n",
225
+ " frame=np.ones_like(frame)*255, masks=annot_masks, colors=COLORS[: len(annot_masks)]\n",
226
+ " )\n",
227
+ " display_image_in_subplot(all_masks, axes, idx, 1, f\"{target_dataset_name} - {noun_phrase} - Frame {sampled_frame_idx} - Masks\")\n",
228
+ " \n",
229
+ " # Show masks overlaid on the frame\n",
230
+ " masked_frame = utils.draw_masks_to_frame(\n",
231
+ " frame=frame, masks=annot_masks, colors=COLORS[: len(annot_masks)]\n",
232
+ " )\n",
233
+ " display_image_in_subplot(masked_frame, axes, idx, 2, f\"Dataset: {target_dataset_name} - {noun_phrase} - Frame {sampled_frame_idx} - Masks overlaid\")\n",
234
+ "\n",
235
+ "plt.tight_layout()\n",
236
+ "plt.show()"
237
+ ]
238
+ },
239
+ {
240
+ "cell_type": "code",
241
+ "execution_count": null,
242
+ "id": "a2a23152",
243
+ "metadata": {},
244
+ "outputs": [],
245
+ "source": []
246
+ }
247
+ ],
248
+ "metadata": {
249
+ "kernelspec": {
250
+ "display_name": "Python 3 (ipykernel)",
251
+ "language": "python",
252
+ "name": "python3"
253
+ },
254
+ "language_info": {
255
+ "codemirror_mode": {
256
+ "name": "ipython",
257
+ "version": 3
258
+ },
259
+ "file_extension": ".py",
260
+ "mimetype": "text/x-python",
261
+ "name": "python",
262
+ "nbconvert_exporter": "python",
263
+ "pygments_lexer": "ipython3",
264
+ "version": "3.10.13"
265
+ }
266
+ },
267
+ "nbformat": 4,
268
+ "nbformat_minor": 5
269
+ }
source_code/sam3/examples/sam3_for_sam1_task_example.ipynb ADDED
@@ -0,0 +1,846 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "id": "f400486b",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "# Copyright (c) Meta Platforms, Inc. and affiliates."
11
+ ]
12
+ },
13
+ {
14
+ "cell_type": "markdown",
15
+ "id": "a1ae39ff",
16
+ "metadata": {
17
+ "jp-MarkdownHeadingCollapsed": true
18
+ },
19
+ "source": [
20
+ "# Interactive Instance Segmentation using SAM 3"
21
+ ]
22
+ },
23
+ {
24
+ "cell_type": "markdown",
25
+ "id": "b4a4b25c",
26
+ "metadata": {},
27
+ "source": [
28
+ "Segment Anything Model 3 (SAM 3) predicts instance masks that indicate the desired object given geometric prompts (SAM 1 task).\n",
29
+ "The `SAM3Image` and `Sam3Processor` classes provide an easy interface to prompt the model. The user first sets an image using the `Sam3Processor.set_image` method, which computes the necessary image embeddings. Then, prompts can be provided via the `predict` method to efficiently predict masks from those prompts. The model can take as input both point and box prompts, as well as masks from the previous iteration of prediction.\n",
30
+ "\n",
31
+ "This notebook follows the SAM 2 API for interactive image segmentation.\n",
32
+ "\n",
33
+ "# <a target=\"_blank\" href=\"https://colab.research.google.com/github/facebookresearch/sam3/blob/main/notebooks/sam3_for_sam1_task_example.ipynb\">\n",
34
+ "# <img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/>\n",
35
+ "# </a>\n"
36
+ ]
37
+ },
38
+ {
39
+ "cell_type": "markdown",
40
+ "id": "644532a8",
41
+ "metadata": {},
42
+ "source": [
43
+ "## Environment Set-up"
44
+ ]
45
+ },
46
+ {
47
+ "cell_type": "markdown",
48
+ "id": "07fabfee",
49
+ "metadata": {},
50
+ "source": [
51
+ "First install `sam3` in your environment using the [installation instructions](https://github.com/facebookresearch/sam3?tab=readme-ov-file#installation) in the repository."
52
+ ]
53
+ },
54
+ {
55
+ "cell_type": "markdown",
56
+ "id": "0be845da",
57
+ "metadata": {},
58
+ "source": [
59
+ "## Set-up"
60
+ ]
61
+ },
62
+ {
63
+ "cell_type": "markdown",
64
+ "id": "33681dd1",
65
+ "metadata": {},
66
+ "source": [
67
+ "Necessary imports and helper functions for displaying points, boxes, and masks."
68
+ ]
69
+ },
70
+ {
71
+ "cell_type": "code",
72
+ "execution_count": null,
73
+ "id": "fe773ede",
74
+ "metadata": {},
75
+ "outputs": [],
76
+ "source": [
77
+ "using_colab = False"
78
+ ]
79
+ },
80
+ {
81
+ "cell_type": "code",
82
+ "execution_count": null,
83
+ "id": "79250a4e",
84
+ "metadata": {},
85
+ "outputs": [],
86
+ "source": [
87
+ "if using_colab:\n",
88
+ " import torch\n",
89
+ " import torchvision\n",
90
+ " print(\"PyTorch version:\", torch.__version__)\n",
91
+ " print(\"Torchvision version:\", torchvision.__version__)\n",
92
+ " print(\"CUDA is available:\", torch.cuda.is_available())\n",
93
+ " import sys\n",
94
+ " !{sys.executable} -m pip install opencv-python matplotlib scikit-learn\n",
95
+ " !{sys.executable} -m pip install 'git+https://github.com/facebookresearch/sam3.git'"
96
+ ]
97
+ },
98
+ {
99
+ "cell_type": "code",
100
+ "execution_count": null,
101
+ "id": "69b28288",
102
+ "metadata": {},
103
+ "outputs": [],
104
+ "source": [
105
+ "import os\n",
106
+ "# if using Apple MPS, fall back to CPU for unsupported ops\n",
107
+ "os.environ[\"PYTORCH_ENABLE_MPS_FALLBACK\"] = \"1\"\n",
108
+ "import numpy as np\n",
109
+ "import torch\n",
110
+ "import matplotlib.pyplot as plt\n",
111
+ "from PIL import Image\n",
112
+ "import sam3\n",
113
+ "sam3_root = os.path.join(os.path.dirname(sam3.__file__), \"..\")\n"
114
+ ]
115
+ },
116
+ {
117
+ "cell_type": "code",
118
+ "execution_count": null,
119
+ "id": "33a15e2f-c7e1-4e5d-862f-fcb751a60b89",
120
+ "metadata": {},
121
+ "outputs": [],
122
+ "source": [
123
+ "# select the device for computation\n",
124
+ "if torch.cuda.is_available():\n",
125
+ " device = torch.device(\"cuda\")\n",
126
+ "elif torch.backends.mps.is_available():\n",
127
+ " device = torch.device(\"mps\")\n",
128
+ "else:\n",
129
+ " device = torch.device(\"cpu\")\n",
130
+ "print(f\"using device: {device}\")\n",
131
+ "\n",
132
+ "if device.type == \"cuda\":\n",
133
+ " # use bfloat16 for the entire notebook\n",
134
+ " torch.autocast(\"cuda\", dtype=torch.bfloat16).__enter__()\n",
135
+ " # turn on tfloat32 for Ampere GPUs (https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices)\n",
136
+ " if torch.cuda.get_device_properties(0).major >= 8:\n",
137
+ " torch.backends.cuda.matmul.allow_tf32 = True\n",
138
+ " torch.backends.cudnn.allow_tf32 = True\n",
139
+ "elif device.type == \"mps\":\n",
140
+ " print(\n",
141
+ " \"\\nSupport for MPS devices is preliminary. SAM 3 is trained with CUDA and might \"\n",
142
+ " \"give numerically different outputs and sometimes degraded performance on MPS. \"\n",
143
+ " \"See e.g. https://github.com/pytorch/pytorch/issues/84936 for a discussion.\"\n",
144
+ " )"
145
+ ]
146
+ },
147
+ {
148
+ "cell_type": "code",
149
+ "execution_count": null,
150
+ "id": "29bc90d5",
151
+ "metadata": {},
152
+ "outputs": [],
153
+ "source": [
154
+ "np.random.seed(3)\n",
155
+ "\n",
156
+ "def show_mask(mask, ax, random_color=False, borders = True):\n",
157
+ " if random_color:\n",
158
+ " color = np.concatenate([np.random.random(3), np.array([0.6])], axis=0)\n",
159
+ " else:\n",
160
+ " color = np.array([30/255, 144/255, 255/255, 0.6])\n",
161
+ " h, w = mask.shape[-2:]\n",
162
+ " mask = mask.astype(np.uint8)\n",
163
+ " mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1)\n",
164
+ " if borders:\n",
165
+ " import cv2\n",
166
+ " contours, _ = cv2.findContours(mask,cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE) \n",
167
+ " # Try to smooth contours\n",
168
+ " contours = [cv2.approxPolyDP(contour, epsilon=0.01, closed=True) for contour in contours]\n",
169
+ " mask_image = cv2.drawContours(mask_image, contours, -1, (1, 1, 1, 0.5), thickness=2) \n",
170
+ " ax.imshow(mask_image)\n",
171
+ "\n",
172
+ "def show_points(coords, labels, ax, marker_size=375):\n",
173
+ " pos_points = coords[labels==1]\n",
174
+ " neg_points = coords[labels==0]\n",
175
+ " ax.scatter(pos_points[:, 0], pos_points[:, 1], color='green', marker='*', s=marker_size, edgecolor='white', linewidth=1.25)\n",
176
+ " ax.scatter(neg_points[:, 0], neg_points[:, 1], color='red', marker='*', s=marker_size, edgecolor='white', linewidth=1.25) \n",
177
+ "\n",
178
+ "def show_box(box, ax):\n",
179
+ " x0, y0 = box[0], box[1]\n",
180
+ " w, h = box[2] - box[0], box[3] - box[1]\n",
181
+ " ax.add_patch(plt.Rectangle((x0, y0), w, h, edgecolor='green', facecolor=(0, 0, 0, 0), lw=2)) \n",
182
+ "\n",
183
+ "def show_masks(image, masks, scores, point_coords=None, box_coords=None, input_labels=None, borders=True):\n",
184
+ " for i, (mask, score) in enumerate(zip(masks, scores)):\n",
185
+ " plt.figure(figsize=(10, 10))\n",
186
+ " plt.imshow(image)\n",
187
+ " show_mask(mask, plt.gca(), borders=borders)\n",
188
+ " if point_coords is not None:\n",
189
+ " assert input_labels is not None\n",
190
+ " show_points(point_coords, input_labels, plt.gca())\n",
191
+ " if box_coords is not None:\n",
192
+ " # boxes\n",
193
+ " show_box(box_coords, plt.gca())\n",
194
+ " if len(scores) > 1:\n",
195
+ " plt.title(f\"Mask {i+1}, Score: {score:.3f}\", fontsize=18)\n",
196
+ " plt.axis('off')\n",
197
+ " plt.show()"
198
+ ]
199
+ },
200
+ {
201
+ "cell_type": "markdown",
202
+ "id": "23842fb2",
203
+ "metadata": {},
204
+ "source": [
205
+ "## Example image"
206
+ ]
207
+ },
208
+ {
209
+ "cell_type": "code",
210
+ "execution_count": null,
211
+ "id": "3c2e4f6b",
212
+ "metadata": {},
213
+ "outputs": [],
214
+ "source": [
215
+ "image = Image.open(f\"{sam3_root}/assets/images/truck.jpg\")"
216
+ ]
217
+ },
218
+ {
219
+ "cell_type": "code",
220
+ "execution_count": null,
221
+ "id": "e30125fd",
222
+ "metadata": {},
223
+ "outputs": [],
224
+ "source": [
225
+ "plt.figure(figsize=(10, 10))\n",
226
+ "plt.imshow(image)\n",
227
+ "plt.axis('on')\n",
228
+ "plt.show()"
229
+ ]
230
+ },
231
+ {
232
+ "cell_type": "markdown",
233
+ "id": "98b228b8",
234
+ "metadata": {},
235
+ "source": [
236
+ "## Selecting objects with SAM 3"
237
+ ]
238
+ },
239
+ {
240
+ "cell_type": "markdown",
241
+ "id": "0bb1927b",
242
+ "metadata": {},
243
+ "source": [
244
+ "First, load the SAM 3 model. Running on CUDA and using the default model are recommended for best results."
245
+ ]
246
+ },
247
+ {
248
+ "cell_type": "code",
249
+ "execution_count": null,
250
+ "id": "7e28150b",
251
+ "metadata": {},
252
+ "outputs": [],
253
+ "source": [
254
+ "from sam3 import build_sam3_image_model\n",
255
+ "from sam3.model.sam3_image_processor import Sam3Processor\n",
256
+ "\n",
257
+ "bpe_path = f\"{sam3_root}/assets/bpe_simple_vocab_16e6.txt.gz\"\n",
258
+ "model = build_sam3_image_model(bpe_path=bpe_path, enable_inst_interactivity=True)\n"
259
+ ]
260
+ },
261
+ {
262
+ "cell_type": "markdown",
263
+ "id": "c925e829",
264
+ "metadata": {},
265
+ "source": [
266
+ "Process the image to produce an image embedding by calling `Sam3Processor.set_image`."
267
+ ]
268
+ },
269
+ {
270
+ "cell_type": "code",
271
+ "execution_count": null,
272
+ "id": "d95d48dd",
273
+ "metadata": {},
274
+ "outputs": [],
275
+ "source": [
276
+ "processor = Sam3Processor(model)\n",
277
+ "inference_state = processor.set_image(image)"
278
+ ]
279
+ },
280
+ {
281
+ "cell_type": "markdown",
282
+ "id": "d8fc7a46",
283
+ "metadata": {},
284
+ "source": [
285
+ "To select the truck, choose a point on it. Points are input to the model in (x,y) format and come with labels 1 (foreground point) or 0 (background point). Multiple points can be input; here we use only one. The chosen point will be shown as a star on the image."
286
+ ]
287
+ },
288
+ {
289
+ "cell_type": "code",
290
+ "execution_count": null,
291
+ "id": "5c69570c",
292
+ "metadata": {},
293
+ "outputs": [],
294
+ "source": [
295
+ "input_point = np.array([[520, 375]])\n",
296
+ "input_label = np.array([1])"
297
+ ]
298
+ },
299
+ {
300
+ "cell_type": "code",
301
+ "execution_count": null,
302
+ "id": "a91ba973",
303
+ "metadata": {},
304
+ "outputs": [],
305
+ "source": [
306
+ "plt.figure(figsize=(10, 10))\n",
307
+ "plt.imshow(image)\n",
308
+ "show_points(input_point, input_label, plt.gca())\n",
309
+ "plt.axis('on')\n",
310
+ "plt.show() "
311
+ ]
312
+ },
313
+ {
314
+ "cell_type": "markdown",
315
+ "id": "c765e952",
316
+ "metadata": {},
317
+ "source": [
318
+ "Predict with `SAM3Image.predict_inst`. The model returns masks, quality predictions for those masks, and low resolution mask logits that can be passed to the next iteration of prediction."
319
+ ]
320
+ },
321
+ {
322
+ "cell_type": "code",
323
+ "execution_count": null,
324
+ "id": "5373fd68",
325
+ "metadata": {},
326
+ "outputs": [],
327
+ "source": [
328
+ "masks, scores, logits = model.predict_inst(\n",
329
+ " inference_state,\n",
330
+ " point_coords=input_point,\n",
331
+ " point_labels=input_label,\n",
332
+ " multimask_output=True,\n",
333
+ ")\n",
334
+ "sorted_ind = np.argsort(scores)[::-1]\n",
335
+ "masks = masks[sorted_ind]\n",
336
+ "scores = scores[sorted_ind]\n",
337
+ "logits = logits[sorted_ind]"
338
+ ]
339
+ },
340
+ {
341
+ "cell_type": "markdown",
342
+ "id": "c7f0e938",
343
+ "metadata": {},
344
+ "source": [
345
+ "With `multimask_output=True` (the default setting), SAM 3 outputs 3 masks, where `scores` gives the model's own estimation of the quality of these masks. This setting is intended for ambiguous input prompts, and helps the model disambiguate different objects consistent with the prompt. When `False`, it will return a single mask. For ambiguous prompts such as a single point, it is recommended to use `multimask_output=True` even if only a single mask is desired; the best single mask can be chosen by picking the one with the highest score returned in `scores`. This will often result in a better mask."
346
+ ]
347
+ },
348
+ {
349
+ "cell_type": "code",
350
+ "execution_count": null,
351
+ "id": "47821187",
352
+ "metadata": {},
353
+ "outputs": [],
354
+ "source": [
355
+ "masks.shape # (number_of_masks) x H x W"
356
+ ]
357
+ },
358
+ {
359
+ "cell_type": "code",
360
+ "execution_count": null,
361
+ "id": "e9c227a6",
362
+ "metadata": {},
363
+ "outputs": [],
364
+ "source": [
365
+ "show_masks(image, masks, scores, point_coords=input_point, input_labels=input_label, borders=True)"
366
+ ]
367
+ },
368
+ {
369
+ "cell_type": "markdown",
370
+ "id": "3fa31f7c",
371
+ "metadata": {},
372
+ "source": [
373
+ "## Specifying a specific object with additional points"
374
+ ]
375
+ },
376
+ {
377
+ "cell_type": "markdown",
378
+ "id": "88d6d29a",
379
+ "metadata": {},
380
+ "source": [
381
+ "The single input point is ambiguous, and the model has returned multiple objects consistent with it. To obtain a single object, multiple points can be provided. If available, a mask from a previous iteration can also be supplied to the model to aid in prediction. When specifying a single object with multiple prompts, a single mask can be requested by setting `multimask_output=False`."
382
+ ]
383
+ },
384
+ {
385
+ "cell_type": "code",
386
+ "execution_count": null,
387
+ "id": "f6923b94",
388
+ "metadata": {},
389
+ "outputs": [],
390
+ "source": [
391
+ "input_point = np.array([[500, 375], [1125, 625]])\n",
392
+ "input_label = np.array([1, 1])\n",
393
+ "\n",
394
+ "mask_input = logits[np.argmax(scores), :, :] # Choose the model's best mask"
395
+ ]
396
+ },
397
+ {
398
+ "cell_type": "code",
399
+ "execution_count": null,
400
+ "id": "d98f96a1",
401
+ "metadata": {},
402
+ "outputs": [],
403
+ "source": [
404
+ "masks, scores, _ = model.predict_inst(\n",
405
+ " inference_state,\n",
406
+ " point_coords=input_point,\n",
407
+ " point_labels=input_label,\n",
408
+ " mask_input=mask_input[None, :, :],\n",
409
+ " multimask_output=False,\n",
410
+ ")"
411
+ ]
412
+ },
413
+ {
414
+ "cell_type": "code",
415
+ "execution_count": null,
416
+ "id": "0ce8b82f",
417
+ "metadata": {},
418
+ "outputs": [],
419
+ "source": [
420
+ "masks.shape"
421
+ ]
422
+ },
423
+ {
424
+ "cell_type": "code",
425
+ "execution_count": null,
426
+ "id": "e06d5c8d",
427
+ "metadata": {},
428
+ "outputs": [],
429
+ "source": [
430
+ "show_masks(image, masks, scores, point_coords=input_point, input_labels=input_label)"
431
+ ]
432
+ },
433
+ {
434
+ "cell_type": "markdown",
435
+ "id": "c93e2087",
436
+ "metadata": {},
437
+ "source": [
438
+ "To exclude the car and specify just the window, a background point (with label 0, here shown in red) can be supplied."
439
+ ]
440
+ },
441
+ {
442
+ "cell_type": "code",
443
+ "execution_count": null,
444
+ "id": "9a196f68",
445
+ "metadata": {},
446
+ "outputs": [],
447
+ "source": [
448
+ "input_point = np.array([[500, 375], [1125, 625]])\n",
449
+ "input_label = np.array([1, 0])\n",
450
+ "\n",
451
+ "mask_input = logits[np.argmax(scores), :, :] # Choose the model's best mask"
452
+ ]
453
+ },
454
+ {
455
+ "cell_type": "code",
456
+ "execution_count": null,
457
+ "id": "81a52282",
458
+ "metadata": {},
459
+ "outputs": [],
460
+ "source": [
461
+ "masks, scores, _ = model.predict_inst(\n",
462
+ " inference_state,\n",
463
+ " point_coords=input_point,\n",
464
+ " point_labels=input_label,\n",
465
+ " mask_input=mask_input[None, :, :],\n",
466
+ " multimask_output=False,\n",
467
+ ")"
468
+ ]
469
+ },
470
+ {
471
+ "cell_type": "code",
472
+ "execution_count": null,
473
+ "id": "bfca709f",
474
+ "metadata": {},
475
+ "outputs": [],
476
+ "source": [
477
+ "show_masks(image, masks, scores, point_coords=input_point, input_labels=input_label)"
478
+ ]
479
+ },
480
+ {
481
+ "cell_type": "markdown",
482
+ "id": "41e2d5a9",
483
+ "metadata": {},
484
+ "source": [
485
+ "## Specifying a specific object with a box"
486
+ ]
487
+ },
488
+ {
489
+ "cell_type": "markdown",
490
+ "id": "d61ca7ac",
491
+ "metadata": {},
492
+ "source": [
493
+ "The model can also take a box as input, provided in xyxy format."
494
+ ]
495
+ },
496
+ {
497
+ "cell_type": "code",
498
+ "execution_count": null,
499
+ "id": "8ea92a7b",
500
+ "metadata": {},
501
+ "outputs": [],
502
+ "source": [
503
+ "input_box = np.array([425, 600, 700, 875])"
504
+ ]
505
+ },
506
+ {
507
+ "cell_type": "code",
508
+ "execution_count": null,
509
+ "id": "b35a8814",
510
+ "metadata": {},
511
+ "outputs": [],
512
+ "source": [
513
+ "masks, scores, _ = model.predict_inst(\n",
514
+ " inference_state,\n",
515
+ " point_coords=None,\n",
516
+ " point_labels=None,\n",
517
+ " box=input_box[None, :],\n",
518
+ " multimask_output=False,\n",
519
+ ")"
520
+ ]
521
+ },
522
+ {
523
+ "cell_type": "code",
524
+ "execution_count": null,
525
+ "id": "3ffb4906",
526
+ "metadata": {},
527
+ "outputs": [],
528
+ "source": [
529
+ "show_masks(image, masks, scores, box_coords=input_box)"
530
+ ]
531
+ },
532
+ {
533
+ "cell_type": "markdown",
534
+ "id": "c1ed9f0a",
535
+ "metadata": {},
536
+ "source": [
537
+ "## Combining points and boxes"
538
+ ]
539
+ },
540
+ {
541
+ "cell_type": "markdown",
542
+ "id": "8455d1c5",
543
+ "metadata": {},
544
+ "source": [
545
+ "Points and boxes may be combined, just by including both types of prompts to the predictor. Here this can be used to select just the trucks's tire, instead of the entire wheel."
546
+ ]
547
+ },
548
+ {
549
+ "cell_type": "code",
550
+ "execution_count": null,
551
+ "id": "90e2e547",
552
+ "metadata": {},
553
+ "outputs": [],
554
+ "source": [
555
+ "input_box = np.array([425, 600, 700, 875])\n",
556
+ "input_point = np.array([[575, 750]])\n",
557
+ "input_label = np.array([0])"
558
+ ]
559
+ },
560
+ {
561
+ "cell_type": "code",
562
+ "execution_count": null,
563
+ "id": "6956d8c4",
564
+ "metadata": {},
565
+ "outputs": [],
566
+ "source": [
567
+ "masks, scores, logits = model.predict_inst(\n",
568
+ " inference_state,\n",
569
+ " point_coords=input_point,\n",
570
+ " point_labels=input_label,\n",
571
+ " box=input_box,\n",
572
+ " multimask_output=False,\n",
573
+ ")"
574
+ ]
575
+ },
576
+ {
577
+ "cell_type": "code",
578
+ "execution_count": null,
579
+ "id": "eb519a31",
580
+ "metadata": {},
581
+ "outputs": [],
582
+ "source": [
583
+ "show_masks(image, masks, scores, box_coords=input_box, point_coords=input_point, input_labels=input_label)"
584
+ ]
585
+ },
586
+ {
587
+ "cell_type": "markdown",
588
+ "id": "45ddbca3",
589
+ "metadata": {},
590
+ "source": [
591
+ "## Batched prompt inputs"
592
+ ]
593
+ },
594
+ {
595
+ "cell_type": "markdown",
596
+ "id": "df6f18a0",
597
+ "metadata": {},
598
+ "source": [
599
+ "`SAM3Image` can take multiple input prompts for the same image, using `predict_inst` method. For example, imagine we have several box outputs from an object detector."
600
+ ]
601
+ },
602
+ {
603
+ "cell_type": "code",
604
+ "execution_count": null,
605
+ "id": "0a06681b",
606
+ "metadata": {},
607
+ "outputs": [],
608
+ "source": [
609
+ "input_boxes = np.array([\n",
610
+ " [75, 275, 1725, 850],\n",
611
+ " [425, 600, 700, 875],\n",
612
+ " [1375, 550, 1650, 800],\n",
613
+ " [1240, 675, 1400, 750],\n",
614
+ "])"
615
+ ]
616
+ },
617
+ {
618
+ "cell_type": "code",
619
+ "execution_count": null,
620
+ "id": "117521a3",
621
+ "metadata": {},
622
+ "outputs": [],
623
+ "source": [
624
+ "masks, scores, _ = model.predict_inst(\n",
625
+ " inference_state,\n",
626
+ " point_coords=None,\n",
627
+ " point_labels=None,\n",
628
+ " box=input_boxes,\n",
629
+ " multimask_output=False,\n",
630
+ ")"
631
+ ]
632
+ },
633
+ {
634
+ "cell_type": "code",
635
+ "execution_count": null,
636
+ "id": "6a8f5d49",
637
+ "metadata": {},
638
+ "outputs": [],
639
+ "source": [
640
+ "masks.shape # (batch_size) x (num_predicted_masks_per_input) x H x W"
641
+ ]
642
+ },
643
+ {
644
+ "cell_type": "code",
645
+ "execution_count": null,
646
+ "id": "c00c3681",
647
+ "metadata": {},
648
+ "outputs": [],
649
+ "source": [
650
+ "plt.figure(figsize=(10, 10))\n",
651
+ "plt.imshow(image)\n",
652
+ "for mask in masks:\n",
653
+ " show_mask(mask.squeeze(0), plt.gca(), random_color=True)\n",
654
+ "for box in input_boxes:\n",
655
+ " show_box(box, plt.gca())\n",
656
+ "plt.axis('off')\n",
657
+ "plt.show()"
658
+ ]
659
+ },
660
+ {
661
+ "cell_type": "markdown",
662
+ "id": "b9a27b5d",
663
+ "metadata": {},
664
+ "source": [
665
+ "## End-to-end batched inference\n",
666
+ "If all prompts are available in advance, it is possible to run SAM 3 directly in an end-to-end fashion. This also allows batching over images."
667
+ ]
668
+ },
669
+ {
670
+ "cell_type": "code",
671
+ "execution_count": null,
672
+ "id": "d485f75b",
673
+ "metadata": {},
674
+ "outputs": [],
675
+ "source": [
676
+ "image1 = image # truck.jpg from above\n",
677
+ "image1_boxes = np.array([\n",
678
+ " [75, 275, 1725, 850],\n",
679
+ " [425, 600, 700, 875],\n",
680
+ " [1375, 550, 1650, 800],\n",
681
+ " [1240, 675, 1400, 750],\n",
682
+ "])\n",
683
+ "\n",
684
+ "image2 = Image.open(f\"{sam3_root}/assets/images/groceries.jpg\")\n",
685
+ "image2_boxes = np.array([\n",
686
+ " [450, 170, 520, 350],\n",
687
+ " [350, 190, 450, 350],\n",
688
+ " [500, 170, 580, 350],\n",
689
+ " [580, 170, 640, 350],\n",
690
+ "])\n",
691
+ "\n",
692
+ "img_batch = [image1, image2]\n",
693
+ "boxes_batch = [image1_boxes, image2_boxes]"
694
+ ]
695
+ },
696
+ {
697
+ "cell_type": "code",
698
+ "execution_count": null,
699
+ "id": "47932c99",
700
+ "metadata": {},
701
+ "outputs": [],
702
+ "source": [
703
+ "inference_state = processor.set_image_batch(img_batch)"
704
+ ]
705
+ },
706
+ {
707
+ "cell_type": "code",
708
+ "execution_count": null,
709
+ "id": "97af3c54",
710
+ "metadata": {},
711
+ "outputs": [],
712
+ "source": [
713
+ "masks_batch, scores_batch, _ = model.predict_inst_batch(\n",
714
+ " inference_state,\n",
715
+ " None,\n",
716
+ " None, \n",
717
+ " box_batch=boxes_batch, \n",
718
+ " multimask_output=False\n",
719
+ ")"
720
+ ]
721
+ },
722
+ {
723
+ "cell_type": "code",
724
+ "execution_count": null,
725
+ "id": "226df881",
726
+ "metadata": {},
727
+ "outputs": [],
728
+ "source": [
729
+ "for image, boxes, masks in zip(img_batch, boxes_batch, masks_batch):\n",
730
+ " plt.figure(figsize=(10, 10))\n",
731
+ " plt.imshow(image) \n",
732
+ " for mask in masks:\n",
733
+ " show_mask(mask.squeeze(0), plt.gca(), random_color=True)\n",
734
+ " for box in boxes:\n",
735
+ " show_box(box, plt.gca())"
736
+ ]
737
+ },
738
+ {
739
+ "cell_type": "markdown",
740
+ "id": "46f30085",
741
+ "metadata": {},
742
+ "source": [
743
+ "Similarly, we can have a batch of point prompts defined over a batch of images"
744
+ ]
745
+ },
746
+ {
747
+ "cell_type": "code",
748
+ "execution_count": null,
749
+ "id": "1ab929fc",
750
+ "metadata": {},
751
+ "outputs": [],
752
+ "source": [
753
+ "image1 = image # truck.jpg from above\n",
754
+ "image1_pts = np.array([\n",
755
+ " [[500, 375]],\n",
756
+ " [[650, 750]]\n",
757
+ " ]) # Bx1x2 where B corresponds to number of objects \n",
758
+ "image1_labels = np.array([[1], [1]])\n",
759
+ "\n",
760
+ "image2_pts = np.array([\n",
761
+ " [[400, 300]],\n",
762
+ " [[630, 300]],\n",
763
+ "])\n",
764
+ "image2_labels = np.array([[1], [1]])\n",
765
+ "\n",
766
+ "pts_batch = [image1_pts, image2_pts]\n",
767
+ "labels_batch = [image1_labels, image2_labels]"
768
+ ]
769
+ },
770
+ {
771
+ "cell_type": "code",
772
+ "execution_count": null,
773
+ "id": "848f8287",
774
+ "metadata": {},
775
+ "outputs": [],
776
+ "source": [
777
+ "masks_batch, scores_batch, _ = model.predict_inst_batch(inference_state, pts_batch, labels_batch, box_batch=None, multimask_output=True)\n",
778
+ "\n",
779
+ "# Select the best single mask per object\n",
780
+ "best_masks = []\n",
781
+ "for masks, scores in zip(masks_batch,scores_batch):\n",
782
+ " best_masks.append(masks[range(len(masks)), np.argmax(scores, axis=-1)])"
783
+ ]
784
+ },
785
+ {
786
+ "cell_type": "code",
787
+ "execution_count": null,
788
+ "id": "99b15c6c",
789
+ "metadata": {},
790
+ "outputs": [],
791
+ "source": [
792
+ "for image, points, labels, masks in zip(img_batch, pts_batch, labels_batch, best_masks):\n",
793
+ " plt.figure(figsize=(10, 10))\n",
794
+ " plt.imshow(image) \n",
795
+ " for mask in masks:\n",
796
+ " show_mask(mask, plt.gca(), random_color=True)\n",
797
+ " show_points(points, labels, plt.gca())"
798
+ ]
799
+ },
800
+ {
801
+ "cell_type": "code",
802
+ "execution_count": null,
803
+ "id": "4c1594a5-a0de-4477-91d4-db4504a78a83",
804
+ "metadata": {},
805
+ "outputs": [],
806
+ "source": []
807
+ },
808
+ {
809
+ "cell_type": "code",
810
+ "execution_count": null,
811
+ "id": "74e3d07e-b0de-48a5-9d29-d639a0dbcdfc",
812
+ "metadata": {},
813
+ "outputs": [],
814
+ "source": []
815
+ },
816
+ {
817
+ "cell_type": "code",
818
+ "execution_count": null,
819
+ "id": "d8b1de3a-a253-48ff-8a1c-d80742acbe86",
820
+ "metadata": {},
821
+ "outputs": [],
822
+ "source": []
823
+ }
824
+ ],
825
+ "metadata": {
826
+ "kernelspec": {
827
+ "display_name": "Python 3 (ipykernel)",
828
+ "language": "python",
829
+ "name": "python3"
830
+ },
831
+ "language_info": {
832
+ "codemirror_mode": {
833
+ "name": "ipython",
834
+ "version": 3
835
+ },
836
+ "file_extension": ".py",
837
+ "mimetype": "text/x-python",
838
+ "name": "python",
839
+ "nbconvert_exporter": "python",
840
+ "pygments_lexer": "ipython3",
841
+ "version": "3.12.11"
842
+ }
843
+ },
844
+ "nbformat": 4,
845
+ "nbformat_minor": 5
846
+ }
source_code/sam3/examples/sam3_image_interactive.ipynb ADDED
@@ -0,0 +1,757 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "id": "5d0e0b69",
7
+ "metadata": {},
8
+ "outputs": [],
9
+ "source": [
10
+ "# Copyright (c) Meta Platforms, Inc. and affiliates."
11
+ ]
12
+ },
13
+ {
14
+ "cell_type": "markdown",
15
+ "id": "11912666",
16
+ "metadata": {},
17
+ "source": [
18
+ "# <a target=\"_blank\" href=\"https://colab.research.google.com/github/facebookresearch/sam3/blob/main/notebooks/sam3_image_interactive.ipynb\">\n",
19
+ "# <img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/>\n",
20
+ "# </a>"
21
+ ]
22
+ },
23
+ {
24
+ "cell_type": "code",
25
+ "execution_count": 2,
26
+ "id": "8517f5f6",
27
+ "metadata": {},
28
+ "outputs": [],
29
+ "source": [
30
+ "using_colab = False"
31
+ ]
32
+ },
33
+ {
34
+ "cell_type": "code",
35
+ "execution_count": 3,
36
+ "id": "2540e376",
37
+ "metadata": {},
38
+ "outputs": [],
39
+ "source": [
40
+ "if using_colab:\n",
41
+ " import torch\n",
42
+ " import torchvision\n",
43
+ " print(\"PyTorch version:\", torch.__version__)\n",
44
+ " print(\"Torchvision version:\", torchvision.__version__)\n",
45
+ " print(\"CUDA is available:\", torch.cuda.is_available())\n",
46
+ " import sys\n",
47
+ " !{sys.executable} -m pip install opencv-python matplotlib scikit-learn\n",
48
+ " !{sys.executable} -m pip install 'git+https://github.com/facebookresearch/sam3.git'"
49
+ ]
50
+ },
51
+ {
52
+ "cell_type": "code",
53
+ "execution_count": 4,
54
+ "id": "90073483-58f6-404e-90ac-c22efcd76216",
55
+ "metadata": {},
56
+ "outputs": [],
57
+ "source": [
58
+ "%matplotlib widget"
59
+ ]
60
+ },
61
+ {
62
+ "cell_type": "code",
63
+ "execution_count": 5,
64
+ "id": "13325376-658b-48d6-8528-2a006f223d44",
65
+ "metadata": {},
66
+ "outputs": [],
67
+ "source": [
68
+ "import torch\n",
69
+ "# turn on tfloat32 for Ampere GPUs\n",
70
+ "# https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices\n",
71
+ "torch.backends.cuda.matmul.allow_tf32 = True\n",
72
+ "torch.backends.cudnn.allow_tf32 = True\n",
73
+ "\n",
74
+ "# use bfloat16 for the entire notebook. If your card doesn't support it, try float16 instead\n",
75
+ "torch.autocast(\"cuda\", dtype=torch.bfloat16).__enter__()\n",
76
+ "\n",
77
+ "# inference mode for the whole notebook. Disable if you need gradients\n",
78
+ "torch.inference_mode().__enter__()"
79
+ ]
80
+ },
81
+ {
82
+ "cell_type": "markdown",
83
+ "id": "fb863772-56a9-4ee2-be52-5d8933066519",
84
+ "metadata": {},
85
+ "source": [
86
+ "# Load the model"
87
+ ]
88
+ },
89
+ {
90
+ "cell_type": "code",
91
+ "execution_count": 6,
92
+ "id": "f84b4ccc-9db2-4d88-ac8f-4c272694d25a",
93
+ "metadata": {},
94
+ "outputs": [],
95
+ "source": [
96
+ "import sam3\n",
97
+ "from sam3 import build_sam3_image_model\n",
98
+ "import os\n",
99
+ "sam3_root = os.path.join(os.path.dirname(sam3.__file__), \"..\")\n",
100
+ "bpe_path = f\"{sam3_root}/assets/bpe_simple_vocab_16e6.txt.gz\""
101
+ ]
102
+ },
103
+ {
104
+ "cell_type": "code",
105
+ "execution_count": 7,
106
+ "id": "de01a36e-1221-4497-a5ab-e6c796689480",
107
+ "metadata": {},
108
+ "outputs": [],
109
+ "source": [
110
+ "model = build_sam3_image_model(bpe_path=bpe_path)"
111
+ ]
112
+ },
113
+ {
114
+ "cell_type": "code",
115
+ "execution_count": 8,
116
+ "id": "b01ec8a9-d9f6-4baf-96ac-1e5d21fd90b8",
117
+ "metadata": {},
118
+ "outputs": [],
119
+ "source": [
120
+ "from sam3.model.sam3_image_processor import Sam3Processor\n",
121
+ "processor = Sam3Processor(model)"
122
+ ]
123
+ },
124
+ {
125
+ "cell_type": "markdown",
126
+ "id": "e6172a69-35ca-487c-bd67-6f1f1ecb20d5",
127
+ "metadata": {},
128
+ "source": [
129
+ "# Jupyter widget"
130
+ ]
131
+ },
132
+ {
133
+ "cell_type": "code",
134
+ "execution_count": 9,
135
+ "id": "2a4ac22f-5d5c-4272-a5a1-dfe0c04253a7",
136
+ "metadata": {},
137
+ "outputs": [],
138
+ "source": [
139
+ "import io\n",
140
+ "\n",
141
+ "import ipywidgets as widgets\n",
142
+ "import matplotlib.pyplot as plt\n",
143
+ "import numpy as np\n",
144
+ "import PIL.Image\n",
145
+ "import requests\n",
146
+ "from IPython.display import clear_output, display, HTML\n",
147
+ "from matplotlib.patches import Rectangle\n",
148
+ "\n",
149
+ "\n",
150
+ "class Sam3SegmentationWidget:\n",
151
+ " \"\"\"Interactive Jupyter widget for SAM3 segmentation with text and box prompts.\"\"\"\n",
152
+ "\n",
153
+ " def __init__(self, processor):\n",
154
+ " \"\"\"\n",
155
+ " Initialize the segmentation widget.\n",
156
+ "\n",
157
+ " Args:\n",
158
+ " processor: Sam3Processor instance\n",
159
+ " \"\"\"\n",
160
+ " self.processor = processor\n",
161
+ " self.state = None\n",
162
+ " self.current_image = None\n",
163
+ " self.current_image_array = None\n",
164
+ " self.box_mode = \"positive\"\n",
165
+ " self.drawing_box = False\n",
166
+ " self.box_start = None\n",
167
+ " self.current_rect = None\n",
168
+ "\n",
169
+ " self._setup_ui()\n",
170
+ " self._setup_plot()\n",
171
+ "\n",
172
+ " def _setup_ui(self):\n",
173
+ " \"\"\"Set up the UI components.\"\"\"\n",
174
+ " self.upload_widget = widgets.FileUpload(\n",
175
+ " accept=\"image/*\", multiple=False, description=\"Upload Image\"\n",
176
+ " )\n",
177
+ " self.upload_widget.observe(self._on_image_upload, names=\"value\")\n",
178
+ "\n",
179
+ " self.url_input = widgets.Text(\n",
180
+ " placeholder=\"Or enter image URL\",\n",
181
+ " )\n",
182
+ " self.url_button = widgets.Button(description=\"Load URL\", button_style=\"info\")\n",
183
+ " self.url_button.on_click(self._on_load_url)\n",
184
+ " url_box = widgets.HBox(\n",
185
+ " [self.url_input, self.url_button],\n",
186
+ " layout=widgets.Layout(width=\"100%\", justify_content=\"space-between\"),\n",
187
+ " )\n",
188
+ "\n",
189
+ " self.text_input = widgets.Text(\n",
190
+ " placeholder='Enter segmentation prompt (e.g., \"person\", \"dog\")',\n",
191
+ " continuous_update=False,\n",
192
+ " )\n",
193
+ " self.text_input.observe(self._on_text_submit, names=\"value\")\n",
194
+ " self.text_button = widgets.Button(description=\"Segment\", button_style=\"success\")\n",
195
+ " self.text_button.on_click(self._on_text_prompt)\n",
196
+ " text_box = widgets.HBox(\n",
197
+ " [self.text_input, self.text_button],\n",
198
+ " layout=widgets.Layout(width=\"100%\", justify_content=\"space-between\"),\n",
199
+ " )\n",
200
+ "\n",
201
+ " self.box_mode_buttons = widgets.ToggleButtons(\n",
202
+ " options=[\"Positive Boxes\", \"Negative Boxes\"],\n",
203
+ " description=\"Box Mode:\",\n",
204
+ " button_style=\"\",\n",
205
+ " tooltips=[\n",
206
+ " \"Draw boxes around objects to include\",\n",
207
+ " \"Draw boxes around objects to exclude\",\n",
208
+ " ],\n",
209
+ " )\n",
210
+ " self.box_mode_buttons.observe(self._on_box_mode_change, names=\"value\")\n",
211
+ "\n",
212
+ " self.clear_button = widgets.Button(\n",
213
+ " description=\"Clear All Prompts\", button_style=\"warning\"\n",
214
+ " )\n",
215
+ " self.clear_button.on_click(self._on_clear_prompts)\n",
216
+ "\n",
217
+ " self.confidence_slider = widgets.FloatSlider(\n",
218
+ " value=0.5,\n",
219
+ " min=0.0,\n",
220
+ " max=1.0,\n",
221
+ " step=0.01,\n",
222
+ " description=\"Confidence:\",\n",
223
+ " continuous_update=False,\n",
224
+ " style={\"description_width\": \"initial\"},\n",
225
+ " )\n",
226
+ " self.confidence_slider.observe(self._on_confidence_change, names=\"value\")\n",
227
+ "\n",
228
+ " self.size_slider = widgets.IntSlider(\n",
229
+ " value=960,\n",
230
+ " min=300,\n",
231
+ " max=2000,\n",
232
+ " step=10,\n",
233
+ " description=\"Image Size:\",\n",
234
+ " continuous_update=False,\n",
235
+ " style={\"description_width\": \"initial\"},\n",
236
+ " )\n",
237
+ " self.size_slider.observe(self._on_size_change, names=\"value\")\n",
238
+ "\n",
239
+ " slider_box = widgets.HBox(\n",
240
+ " [self.confidence_slider, self.size_slider],\n",
241
+ " layout=widgets.Layout(justify_content=\"space-between\"),\n",
242
+ " )\n",
243
+ "\n",
244
+ " self.output = widgets.Output()\n",
245
+ " self.status_label = widgets.Label(value=\"Upload an image to begin\")\n",
246
+ "\n",
247
+ " # This box will hold our matplotlib output and we can target it with CSS.\n",
248
+ " self.plot_container = widgets.Box([self.output])\n",
249
+ " self.plot_container.add_class(\"no-drag\")\n",
250
+ "\n",
251
+ " # CSS to make the cursor a crosshair over the matplotlib canvas\n",
252
+ " css_style = widgets.HTML(\n",
253
+ " \"\"\"\n",
254
+ " <style>\n",
255
+ " .jupyter-matplotlib-canvas, canvas {\n",
256
+ " cursor: crosshair !important;\n",
257
+ " }\n",
258
+ " </style>\n",
259
+ " \"\"\"\n",
260
+ " )\n",
261
+ " # Create VBoxes for each accordion pane\n",
262
+ " source_pane = widgets.VBox([self.upload_widget, url_box])\n",
263
+ " prompt_pane = widgets.VBox(\n",
264
+ " [\n",
265
+ " widgets.Label(\"Text Prompt:\"),\n",
266
+ " text_box,\n",
267
+ " self.box_mode_buttons,\n",
268
+ " self.confidence_slider,\n",
269
+ " self.clear_button,\n",
270
+ " ]\n",
271
+ " )\n",
272
+ " display_pane = widgets.VBox([self.size_slider])\n",
273
+ "\n",
274
+ " # Create the Accordion to hold the control panes\n",
275
+ " self.accordion = widgets.Accordion(\n",
276
+ " children=[source_pane, prompt_pane, display_pane]\n",
277
+ " )\n",
278
+ " self.accordion.set_title(0, \"Image Source\")\n",
279
+ " self.accordion.set_title(1, \"Segmentation Prompts\")\n",
280
+ " self.accordion.set_title(2, \"Display Settings\")\n",
281
+ " self.accordion.selected_index = 0 # Start with the first pane open\n",
282
+ "\n",
283
+ " # Create the left sidebar for controls\n",
284
+ " sidebar = widgets.VBox(\n",
285
+ " [self.status_label, widgets.HTML(\"<h4>Controls</h4>\"), self.accordion]\n",
286
+ " )\n",
287
+ " sidebar.layout = widgets.Layout(\n",
288
+ " width=\"380px\",\n",
289
+ " min_width=\"380px\",\n",
290
+ " max_width=\"380px\",\n",
291
+ " border=\"1px solid #e0e0e0\",\n",
292
+ " padding=\"10px\",\n",
293
+ " margin=\"0 15px 0 0\",\n",
294
+ " flex=\"0 0 auto\",\n",
295
+ " )\n",
296
+ "\n",
297
+ " # Create the main area for the image display\n",
298
+ " main_area = widgets.VBox([self.plot_container])\n",
299
+ " main_area.layout = widgets.Layout(flex=\"1\", min_width=\"500px\", overflow=\"auto\")\n",
300
+ "\n",
301
+ " # Combine sidebar and main area into the final app layout\n",
302
+ " app_layout = widgets.HBox([sidebar, main_area])\n",
303
+ " app_layout.layout = widgets.Layout(\n",
304
+ " width=\"100%\",\n",
305
+ " display=\"flex\",\n",
306
+ " flex_flow=\"row\",\n",
307
+ " align_items=\"stretch\",\n",
308
+ " )\n",
309
+ "\n",
310
+ " # Set the main container\n",
311
+ " self.container = widgets.VBox(\n",
312
+ " [\n",
313
+ " css_style,\n",
314
+ " widgets.HTML(\"<h3>🖼️ SAM3 Interactive Segmentation</h3>\"),\n",
315
+ " app_layout,\n",
316
+ " ]\n",
317
+ " )\n",
318
+ "\n",
319
+ " def _setup_plot(self):\n",
320
+ " \"\"\"Set up the matplotlib figure.\"\"\"\n",
321
+ " # plt.ioff()\n",
322
+ " self.fig, self.ax = plt.subplots(figsize=(12, 8))\n",
323
+ " # plt.ion()\n",
324
+ " self.ax.axis(\"off\")\n",
325
+ " self.fig.subplots_adjust(left=0, right=1, top=1, bottom=0)\n",
326
+ " self.fig.canvas.toolbar_visible = False\n",
327
+ " self.fig.canvas.header_visible = False\n",
328
+ " self.fig.canvas.footer_visible = False\n",
329
+ " self.fig.canvas.resizable = False\n",
330
+ "\n",
331
+ " # plt.close(self.fig)\n",
332
+ "\n",
333
+ " def _set_loading(self, is_loading, message=\"Processing...\"):\n",
334
+ " \"\"\"Show/hide loading state and disable/enable controls.\"\"\"\n",
335
+ " if is_loading:\n",
336
+ " self.status_label.value = f\"⏳ {message}\"\n",
337
+ " self.upload_widget.disabled = True\n",
338
+ " self.url_button.disabled = True\n",
339
+ " self.text_button.disabled = True\n",
340
+ " self.clear_button.disabled = True\n",
341
+ " self.box_mode_buttons.disabled = True\n",
342
+ " self.confidence_slider.disabled = True\n",
343
+ " else:\n",
344
+ " self.upload_widget.disabled = False\n",
345
+ " self.url_button.disabled = False\n",
346
+ " self.text_button.disabled = False\n",
347
+ " self.clear_button.disabled = False\n",
348
+ " self.box_mode_buttons.disabled = False\n",
349
+ " self.confidence_slider.disabled = False\n",
350
+ "\n",
351
+ " def _on_image_upload(self, change):\n",
352
+ " \"\"\"Handle image upload.\"\"\"\n",
353
+ " if change[\"new\"]:\n",
354
+ " uploaded_file = change[\"new\"][0]\n",
355
+ " image = PIL.Image.open(io.BytesIO(uploaded_file[\"content\"])).convert(\"RGB\")\n",
356
+ " self._set_image(image)\n",
357
+ "\n",
358
+ " def _on_load_url(self, button):\n",
359
+ " \"\"\"Handle loading image from URL.\"\"\"\n",
360
+ " url = self.url_input.value.strip()\n",
361
+ " if not url:\n",
362
+ " self.status_label.value = \"Please enter a URL\"\n",
363
+ " return\n",
364
+ "\n",
365
+ " self._set_loading(True, \"Downloading image from URL...\")\n",
366
+ "\n",
367
+ " try:\n",
368
+ " response = requests.get(url, timeout=10)\n",
369
+ " response.raise_for_status()\n",
370
+ " image = PIL.Image.open(io.BytesIO(response.content)).convert(\"RGB\")\n",
371
+ " self._set_image(image)\n",
372
+ " except Exception as e:\n",
373
+ " self._set_loading(False)\n",
374
+ " self.status_label.value = f\"Error loading image: {str(e)}\"\n",
375
+ "\n",
376
+ " def _set_image(self, image):\n",
377
+ " \"\"\"Set the current image, adjust figure size, and initialize state.\"\"\"\n",
378
+ " self._set_loading(True, \"Processing image through model...\")\n",
379
+ "\n",
380
+ " try:\n",
381
+ "\n",
382
+ " self.current_image = image\n",
383
+ " self.current_image_array = np.array(image)\n",
384
+ " self.state = self.processor.set_image(image)\n",
385
+ " self._set_loading(False)\n",
386
+ " self.status_label.value = (\n",
387
+ " f\"Image loaded: {image.size[0]}x{image.size[1]} pixels\"\n",
388
+ " )\n",
389
+ " self._resize_figure()\n",
390
+ " self._update_display()\n",
391
+ " self._connect_plot_events()\n",
392
+ " self.accordion.selected_index = 1\n",
393
+ " except Exception as e:\n",
394
+ " self._set_loading(False)\n",
395
+ " self.status_label.value = f\"Error processing image: {str(e)}\"\n",
396
+ "\n",
397
+ " def _on_text_submit(self, change):\n",
398
+ " \"\"\"Handle text prompt submission via Enter key.\"\"\"\n",
399
+ " # Call the same handler as the button click\n",
400
+ " self._on_text_prompt(None)\n",
401
+ "\n",
402
+ " def _on_text_prompt(self, button):\n",
403
+ " \"\"\"Handle text prompt submission.\"\"\"\n",
404
+ " if self.state is None:\n",
405
+ " self.status_label.value = \"Please load an image first\"\n",
406
+ " return\n",
407
+ "\n",
408
+ " prompt = self.text_input.value.strip()\n",
409
+ " if not prompt:\n",
410
+ " self.status_label.value = \"Please enter a prompt\"\n",
411
+ " return\n",
412
+ "\n",
413
+ " self._set_loading(True, f'Segmenting with prompt: \"{prompt}\"...')\n",
414
+ "\n",
415
+ " try:\n",
416
+ " self.state = self.processor.set_text_prompt(prompt, self.state)\n",
417
+ " self._set_loading(False)\n",
418
+ " self.status_label.value = f'Segmented with prompt: \"{prompt}\"'\n",
419
+ " self._update_display()\n",
420
+ " except Exception as e:\n",
421
+ " self._set_loading(False)\n",
422
+ " self.status_label.value = f\"Error: {str(e)}\"\n",
423
+ "\n",
424
+ " def _on_box_mode_change(self, change):\n",
425
+ " \"\"\"Handle box mode toggle.\"\"\"\n",
426
+ " self.box_mode = \"positive\" if change[\"new\"] == \"Positive Boxes\" else \"negative\"\n",
427
+ "\n",
428
+ " def _on_clear_prompts(self, button):\n",
429
+ " \"\"\"Clear all prompts and reset to image only.\"\"\"\n",
430
+ " if self.current_image is not None:\n",
431
+ " try:\n",
432
+ " self._set_loading(True, \"Clearing prompts and resetting...\")\n",
433
+ " self.state = self.processor.reset_all_prompts(self.state)\n",
434
+ " if \"prompted_boxes\" in self.state:\n",
435
+ " del self.state[\"prompted_boxes\"]\n",
436
+ " self.text_input.value = \"\"\n",
437
+ " self._set_loading(False)\n",
438
+ " self.status_label.value = \"Cleared all prompts\"\n",
439
+ " self._update_display()\n",
440
+ " except Exception as e:\n",
441
+ " self._set_loading(False)\n",
442
+ " import traceback\n",
443
+ "\n",
444
+ " self.status_label.value = f\"Error: {str(e)} {traceback.format_exc()}\"\n",
445
+ "\n",
446
+ " def _on_confidence_change(self, change):\n",
447
+ " \"\"\"Handle confidence threshold change.\"\"\"\n",
448
+ " if self.state is not None:\n",
449
+ " self.state = self.processor.set_confidence_threshold(\n",
450
+ " change[\"new\"], self.state\n",
451
+ " )\n",
452
+ " self._update_display()\n",
453
+ "\n",
454
+ " def _connect_plot_events(self):\n",
455
+ " \"\"\"Connect matplotlib event handlers for box drawing.\"\"\"\n",
456
+ " # Disable matplotlib's toolbar navigation to allow custom box drawing\n",
457
+ " if hasattr(self.fig.canvas, \"toolbar\") and self.fig.canvas.toolbar is not None:\n",
458
+ " self.fig.canvas.toolbar.pan()\n",
459
+ " self.fig.canvas.toolbar.pan()\n",
460
+ "\n",
461
+ " self.fig.canvas.mpl_connect(\"button_press_event\", self._on_press)\n",
462
+ " self.fig.canvas.mpl_connect(\"button_release_event\", self._on_release)\n",
463
+ " self.fig.canvas.mpl_connect(\"motion_notify_event\", self._on_motion)\n",
464
+ "\n",
465
+ " def _on_press(self, event):\n",
466
+ " \"\"\"Handle mouse press for box drawing.\"\"\"\n",
467
+ " if event.inaxes != self.ax:\n",
468
+ " return\n",
469
+ " self.drawing_box = True\n",
470
+ " self.box_start = (event.xdata, event.ydata)\n",
471
+ "\n",
472
+ " def _on_motion(self, event):\n",
473
+ " \"\"\"Handle mouse motion for box preview.\"\"\"\n",
474
+ " if not self.drawing_box or event.inaxes != self.ax or self.box_start is None:\n",
475
+ " return\n",
476
+ "\n",
477
+ " if self.current_rect is not None:\n",
478
+ " self.current_rect.remove()\n",
479
+ "\n",
480
+ " x0, y0 = self.box_start\n",
481
+ " x1, y1 = event.xdata, event.ydata\n",
482
+ " width = x1 - x0\n",
483
+ " height = y1 - y0\n",
484
+ "\n",
485
+ " color = \"green\" if self.box_mode == \"positive\" else \"red\"\n",
486
+ " self.current_rect = Rectangle(\n",
487
+ " (x0, y0),\n",
488
+ " width,\n",
489
+ " height,\n",
490
+ " fill=False,\n",
491
+ " edgecolor=color,\n",
492
+ " linewidth=2,\n",
493
+ " linestyle=\"--\",\n",
494
+ " )\n",
495
+ " self.ax.add_patch(self.current_rect)\n",
496
+ " self.fig.canvas.draw_idle()\n",
497
+ "\n",
498
+ " def _on_release(self, event):\n",
499
+ " \"\"\"Handle mouse release to finalize box.\"\"\"\n",
500
+ " if not self.drawing_box or event.inaxes != self.ax or self.box_start is None:\n",
501
+ " self.drawing_box = False\n",
502
+ " return\n",
503
+ "\n",
504
+ " self.drawing_box = False\n",
505
+ "\n",
506
+ " if self.current_rect is not None:\n",
507
+ " self.current_rect.remove()\n",
508
+ " self.current_rect = None\n",
509
+ "\n",
510
+ " if self.state is None:\n",
511
+ " return\n",
512
+ "\n",
513
+ " x0, y0 = self.box_start\n",
514
+ " x1, y1 = event.xdata, event.ydata\n",
515
+ "\n",
516
+ " x_min = min(x0, x1)\n",
517
+ " x_max = max(x0, x1)\n",
518
+ " y_min = min(y0, y1)\n",
519
+ " y_max = max(y0, y1)\n",
520
+ "\n",
521
+ " if abs(x_max - x_min) < 5 or abs(y_max - y_min) < 5:\n",
522
+ " return\n",
523
+ "\n",
524
+ " # Get image dimensions\n",
525
+ " img_h = self.state[\"original_height\"]\n",
526
+ " img_w = self.state[\"original_width\"]\n",
527
+ "\n",
528
+ " # Convert from xyxy pixel coordinates to cxcywh normalized format\n",
529
+ " center_x = (x_min + x_max) / 2.0 / img_w\n",
530
+ " center_y = (y_min + y_max) / 2.0 / img_h\n",
531
+ " width = (x_max - x_min) / img_w\n",
532
+ " height = (y_max - y_min) / img_h\n",
533
+ "\n",
534
+ " box = [center_x, center_y, width, height]\n",
535
+ " label = self.box_mode == \"positive\"\n",
536
+ " mode_str = \"positive\" if label else \"negative\"\n",
537
+ "\n",
538
+ " # Store the prompted box in pixel coordinates for display\n",
539
+ " if \"prompted_boxes\" not in self.state:\n",
540
+ " self.state[\"prompted_boxes\"] = []\n",
541
+ " self.state[\"prompted_boxes\"].append(\n",
542
+ " {\"box\": [x_min, y_min, x_max, y_max], \"label\": label}\n",
543
+ " )\n",
544
+ "\n",
545
+ " self._set_loading(True, f\"Adding {mode_str} box and re-segmenting...\")\n",
546
+ "\n",
547
+ " try:\n",
548
+ " self.state = self.processor.add_geometric_prompt(box, label, self.state)\n",
549
+ " self._set_loading(False)\n",
550
+ " self.status_label.value = f\"Added {mode_str} box\"\n",
551
+ " self._update_display()\n",
552
+ " except Exception as e:\n",
553
+ " self._set_loading(False)\n",
554
+ " self.status_label.value = f\"Error adding box: {str(e)}\"\n",
555
+ "\n",
556
+ " def _resize_figure(self):\n",
557
+ " \"\"\"Calculate and apply new figure size based on image and slider value.\"\"\"\n",
558
+ " if self.current_image is None:\n",
559
+ " return\n",
560
+ "\n",
561
+ " # 1. Get original image dimensions\n",
562
+ " img_w, img_h = self.current_image.size\n",
563
+ "\n",
564
+ " # 2. The slider's value is now the direct target width for the display\n",
565
+ " display_w = float(self.size_slider.value)\n",
566
+ "\n",
567
+ " # 3. Calculate the corresponding height to maintain the original aspect ratio\n",
568
+ " aspect_ratio = img_h / img_w\n",
569
+ " display_h = int(display_w * aspect_ratio)\n",
570
+ "\n",
571
+ " # 4. Convert pixel dimensions to inches for Matplotlib and apply\n",
572
+ " dpi = self.fig.dpi\n",
573
+ " new_figsize = (display_w / dpi, display_h / dpi)\n",
574
+ " self.fig.set_size_inches(new_figsize, forward=True)\n",
575
+ "\n",
576
+ " def _on_size_change(self, change):\n",
577
+ " \"\"\"Handle a change from the image size slider.\"\"\"\n",
578
+ " if self.current_image is not None:\n",
579
+ " self._resize_figure()\n",
580
+ " # After resizing the canvas, we must redraw the content\n",
581
+ " self._update_display()\n",
582
+ "\n",
583
+ " def _update_display(self):\n",
584
+ " \"\"\"Update the display with current results.\"\"\"\n",
585
+ " if self.current_image_array is None:\n",
586
+ " return\n",
587
+ "\n",
588
+ " with self.output:\n",
589
+ " clear_output(wait=True)\n",
590
+ "\n",
591
+ " self.ax.clear()\n",
592
+ " self.ax.axis(\"off\")\n",
593
+ " self.ax.imshow(self.current_image_array)\n",
594
+ "\n",
595
+ " if self.state is not None and \"masks\" in self.state:\n",
596
+ " masks = self.state.get(\"masks\", [])\n",
597
+ " boxes = self.state.get(\"boxes\", [])\n",
598
+ " scores = self.state.get(\"scores\", [])\n",
599
+ "\n",
600
+ " if len(masks) > 0:\n",
601
+ " mask_overlay = np.zeros((*self.current_image_array.shape[:2], 4))\n",
602
+ "\n",
603
+ " for i, (mask, box, score) in enumerate(zip(masks, boxes, scores)):\n",
604
+ " mask_np = mask[0].cpu().numpy()\n",
605
+ "\n",
606
+ " color = plt.cm.tab10(i % 10)[:3]\n",
607
+ " mask_overlay[mask_np > 0.5] = (*color, 0.5)\n",
608
+ "\n",
609
+ " x0, y0, x1, y1 = box.cpu().numpy()\n",
610
+ " rect = Rectangle(\n",
611
+ " (x0, y0),\n",
612
+ " x1 - x0,\n",
613
+ " y1 - y0,\n",
614
+ " fill=False,\n",
615
+ " edgecolor=color,\n",
616
+ " linewidth=2,\n",
617
+ " )\n",
618
+ " self.ax.add_patch(rect)\n",
619
+ "\n",
620
+ " self.ax.text(\n",
621
+ " x0,\n",
622
+ " y0 - 5,\n",
623
+ " f\"{score:.2f}\",\n",
624
+ " color=\"white\",\n",
625
+ " fontsize=10,\n",
626
+ " bbox=dict(\n",
627
+ " facecolor=color, alpha=0.7, edgecolor=\"none\", pad=2\n",
628
+ " ),\n",
629
+ " )\n",
630
+ "\n",
631
+ " self.ax.imshow(mask_overlay)\n",
632
+ " self.status_label.value = f\"Found {len(masks)} object(s)\"\n",
633
+ " else:\n",
634
+ " self.status_label.value = (\n",
635
+ " \"No objects found above confidence threshold\"\n",
636
+ " )\n",
637
+ "\n",
638
+ " # Display prompted boxes with dashed lines\n",
639
+ " if self.state is not None and \"prompted_boxes\" in self.state:\n",
640
+ " for prompted_box in self.state[\"prompted_boxes\"]:\n",
641
+ " box_coords = prompted_box[\"box\"]\n",
642
+ " is_positive = prompted_box[\"label\"]\n",
643
+ "\n",
644
+ " x0, y0, x1, y1 = box_coords\n",
645
+ " color = \"green\" if is_positive else \"red\"\n",
646
+ "\n",
647
+ " rect = Rectangle(\n",
648
+ " (x0, y0),\n",
649
+ " x1 - x0,\n",
650
+ " y1 - y0,\n",
651
+ " fill=False,\n",
652
+ " edgecolor=color,\n",
653
+ " linewidth=2,\n",
654
+ " linestyle=\"--\",\n",
655
+ " )\n",
656
+ " self.ax.add_patch(rect)\n",
657
+ "\n",
658
+ " # display(self.fig.canvas)\n",
659
+ "\n",
660
+ " def display(self):\n",
661
+ " display(self.container)\n",
662
+ "\n",
663
+ " # Add this for more convenient display in notebooks\n",
664
+ " def _ipython_display_(self):\n",
665
+ " self.display()\n"
666
+ ]
667
+ },
668
+ {
669
+ "cell_type": "markdown",
670
+ "id": "1b9bda74-b455-4957-9767-2a46a041b50f",
671
+ "metadata": {},
672
+ "source": [
673
+ "# Run!"
674
+ ]
675
+ },
676
+ {
677
+ "cell_type": "code",
678
+ "execution_count": 10,
679
+ "id": "ebfb9b85-2318-4328-bb0e-e93e4a57fefe",
680
+ "metadata": {},
681
+ "outputs": [
682
+ {
683
+ "data": {
684
+ "application/vnd.jupyter.widget-view+json": {
685
+ "model_id": "ea0e04a1bfd7486b93baae650d87e0b2",
686
+ "version_major": 2,
687
+ "version_minor": 0
688
+ },
689
+ "text/plain": [
690
+ "VBox(children=(HTML(value='\\n <style>\\n .jupyter-matplotlib-canvas, canvas {\\n …"
691
+ ]
692
+ },
693
+ "metadata": {},
694
+ "output_type": "display_data"
695
+ },
696
+ {
697
+ "data": {
698
+ "application/vnd.jupyter.widget-view+json": {
699
+ "model_id": "bbdcb3374c29461bb379d4bf9c319a49",
700
+ "version_major": 2,
701
+ "version_minor": 0
702
+ },
703
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAABLAAAAMgCAYAAAAz4JsCAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjYsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvq6yFwwAAAAlwSFlzAAAPYQAAD2EBqD+naQAAFf1JREFUeJzt2DEBACAMwDDAv+fhgJceiYLe3TMzCwAAAACizu8AAAAAAHgxsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEi710EKPFzQ9BwAAAAASUVORK5CYII=",
704
+ "text/html": [
705
+ "\n",
706
+ " <div style=\"display: inline-block;\">\n",
707
+ " <div class=\"jupyter-widgets widget-label\" style=\"text-align: center;\">\n",
708
+ " Figure\n",
709
+ " </div>\n",
710
+ " <img src='data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAABLAAAAMgCAYAAAAz4JsCAAAAOnRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjEwLjYsIGh0dHBzOi8vbWF0cGxvdGxpYi5vcmcvq6yFwwAAAAlwSFlzAAAPYQAAD2EBqD+naQAAFf1JREFUeJzt2DEBACAMwDDAv+fhgJceiYLe3TMzCwAAAACizu8AAAAAAHgxsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEgzsAAAAABIM7AAAAAASDOwAAAAAEi710EKPFzQ9BwAAAAASUVORK5CYII=' width=1200.0/>\n",
711
+ " </div>\n",
712
+ " "
713
+ ],
714
+ "text/plain": [
715
+ "Canvas(footer_visible=False, header_visible=False, resizable=False, toolbar=Toolbar(toolitems=[('Home', 'Reset…"
716
+ ]
717
+ },
718
+ "metadata": {},
719
+ "output_type": "display_data"
720
+ }
721
+ ],
722
+ "source": [
723
+ "widget = Sam3SegmentationWidget(processor)\n",
724
+ "widget.display()"
725
+ ]
726
+ },
727
+ {
728
+ "cell_type": "code",
729
+ "execution_count": null,
730
+ "id": "50a14560-573a-4784-9f55-689fda9147be",
731
+ "metadata": {},
732
+ "outputs": [],
733
+ "source": []
734
+ }
735
+ ],
736
+ "metadata": {
737
+ "kernelspec": {
738
+ "display_name": "Python 3 (ipykernel)",
739
+ "language": "python",
740
+ "name": "python3"
741
+ },
742
+ "language_info": {
743
+ "codemirror_mode": {
744
+ "name": "ipython",
745
+ "version": 3
746
+ },
747
+ "file_extension": ".py",
748
+ "mimetype": "text/x-python",
749
+ "name": "python",
750
+ "nbconvert_exporter": "python",
751
+ "pygments_lexer": "ipython3",
752
+ "version": "3.12.11"
753
+ }
754
+ },
755
+ "nbformat": 4,
756
+ "nbformat_minor": 5
757
+ }
source_code/sam3/examples/sam3_image_predictor_example.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
source_code/sam3/medsam3_brats/train_sam3_video_lora_ddp.py ADDED
@@ -0,0 +1,689 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ SAM3 (video model detector) + LoRA finetuning for BraTS, with optional torchrun DDP.
4
+
5
+ This script treats a 3D volume as a short video clip (slice sequence) and trains:
6
+ - LoRA adapters injected into SAM3 detector (backbone/transformer attention proj layers)
7
+ - a lightweight decoder head to predict a binary tumor mask per frame
8
+
9
+ It is designed to be compatible with inference in `infer_brats_sam3.py` by saving LoRA
10
+ weights **on the SAM3 detector module** (so keys match `sam3_video_model.detector`).
11
+ """
12
+
13
+ import argparse
14
+ import json
15
+ import os
16
+ import sys
17
+ from datetime import datetime
18
+ from pathlib import Path
19
+ from typing import Dict, Optional, Tuple
20
+
21
+ import numpy as np
22
+ import torch
23
+ import torch.distributed as dist
24
+ import torch.nn as nn
25
+ import torch.nn.functional as F
26
+ from torch.amp import autocast
27
+ from torch.nn.parallel import DistributedDataParallel as DDP
28
+ from torch.utils.data import DataLoader
29
+ from torch.utils.data.distributed import DistributedSampler
30
+ from torch.utils.tensorboard import SummaryWriter
31
+ from tqdm import tqdm
32
+
33
+ # Add SAM3 repo to import path
34
+ sys.path.insert(0, "/root/githubs/sam3")
35
+
36
+ from brats_dataset import BraTSImageDataset, BraTSVideoDataset, collate_fn_brats
37
+ from lora import apply_lora_to_model, count_parameters, load_lora_weights, save_lora_weights
38
+
39
+
40
+ def _distributed_info():
41
+ if "RANK" in os.environ and "WORLD_SIZE" in os.environ:
42
+ rank = int(os.environ["RANK"])
43
+ world_size = int(os.environ["WORLD_SIZE"])
44
+ local_rank = int(os.environ.get("LOCAL_RANK", "0"))
45
+ return True, rank, world_size, local_rank
46
+ return False, 0, 1, 0
47
+
48
+
49
+ def setup_distributed() -> Tuple[bool, int, int, int]:
50
+ distributed, rank, world_size, local_rank = _distributed_info()
51
+ if not distributed:
52
+ return False, 0, 1, 0
53
+ torch.cuda.set_device(local_rank)
54
+ # Newer PyTorch versions warn that NCCL may hang if the rank->GPU mapping is unknown.
55
+ # Pass device_id when supported, and use explicit device_ids in barrier to avoid ambiguity.
56
+ try:
57
+ dist.init_process_group(
58
+ backend="nccl",
59
+ init_method="env://",
60
+ device_id=torch.device(f"cuda:{local_rank}"),
61
+ )
62
+ except TypeError:
63
+ # Older PyTorch doesn't support device_id
64
+ dist.init_process_group(backend="nccl", init_method="env://")
65
+
66
+ try:
67
+ dist.barrier(device_ids=[local_rank])
68
+ except TypeError:
69
+ dist.barrier()
70
+ return True, rank, world_size, local_rank
71
+
72
+
73
+ def cleanup_distributed():
74
+ if dist.is_available() and dist.is_initialized():
75
+ # Best-effort sync before destroy; avoid hanging if device mapping is ambiguous.
76
+ try:
77
+ local_rank = int(os.environ.get("LOCAL_RANK", "0"))
78
+ dist.barrier(device_ids=[local_rank])
79
+ except Exception:
80
+ try:
81
+ dist.barrier()
82
+ except Exception:
83
+ pass
84
+ dist.destroy_process_group()
85
+
86
+
87
+ def is_main_process(rank: int) -> bool:
88
+ return rank == 0
89
+
90
+
91
+ def setup_device(local_rank: int = 0) -> torch.device:
92
+ if torch.cuda.is_available():
93
+ torch.cuda.set_device(local_rank)
94
+ device = torch.device(f"cuda:{local_rank}")
95
+ # TF32 for A100+
96
+ torch.backends.cuda.matmul.allow_tf32 = True
97
+ torch.backends.cudnn.allow_tf32 = True
98
+ return device
99
+ return torch.device("cpu")
100
+
101
+
102
+ def multiclass_dice_loss(pred: torch.Tensor, target: torch.Tensor, num_classes: int = 4, smooth: float = 1.0) -> torch.Tensor:
103
+ """
104
+ 多类 Dice Loss
105
+ pred: (B, num_classes, H, W) logits
106
+ target: (B, H, W) class indices
107
+ """
108
+ pred_softmax = F.softmax(pred, dim=1) # (B, C, H, W)
109
+ target_onehot = F.one_hot(target.long(), num_classes).permute(0, 3, 1, 2).float() # (B, C, H, W)
110
+
111
+ # 计算每个类的 dice loss (跳过背景类 0)
112
+ dice_losses = []
113
+ for c in range(1, num_classes): # 只计算肿瘤类
114
+ pred_c = pred_softmax[:, c].reshape(-1)
115
+ target_c = target_onehot[:, c].reshape(-1)
116
+ intersection = (pred_c * target_c).sum()
117
+ union = pred_c.sum() + target_c.sum()
118
+ dice = (2.0 * intersection + smooth) / (union + smooth)
119
+ dice_losses.append(1.0 - dice)
120
+
121
+ return torch.stack(dice_losses).mean()
122
+
123
+
124
+ def multiclass_ce_loss(pred: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
125
+ """
126
+ 多类交叉熵损失
127
+ pred: (B, num_classes, H, W) logits
128
+ target: (B, H, W) class indices
129
+ """
130
+ return F.cross_entropy(pred, target.long(), reduction="mean")
131
+
132
+
133
+ def combined_loss(pred: torch.Tensor, target: torch.Tensor, num_classes: int = 4) -> torch.Tensor:
134
+ """组合损失: Dice Loss + CrossEntropy"""
135
+ return 0.5 * multiclass_dice_loss(pred, target, num_classes) + 0.5 * multiclass_ce_loss(pred, target)
136
+
137
+
138
+ def compute_dice(pred: torch.Tensor, target: torch.Tensor, num_classes: int = 4) -> float:
139
+ """
140
+ 计算多类平均 Dice (只计算肿瘤类, 跳过背景)
141
+ pred: (B, num_classes, H, W) logits
142
+ target: (B, H, W) class indices
143
+ """
144
+ pred_class = pred.argmax(dim=1) # (B, H, W)
145
+
146
+ dice_scores = []
147
+ for c in range(1, num_classes): # 跳过背景类 0
148
+ pred_c = (pred_class == c).float()
149
+ target_c = (target == c).float()
150
+
151
+ intersection = (pred_c * target_c).sum()
152
+ union = pred_c.sum() + target_c.sum()
153
+
154
+ if union == 0:
155
+ # 如果 GT 和预测都没有这个类, dice = 1
156
+ dice_scores.append(1.0)
157
+ else:
158
+ dice_scores.append((2.0 * intersection / union).item())
159
+
160
+ return sum(dice_scores) / len(dice_scores)
161
+
162
+
163
+ class MedSAM3DetectorSeg(nn.Module):
164
+ """
165
+ Minimal trainable segmentation model:
166
+ SAM3 detector backbone -> lightweight decoder -> mask logits (B, num_classes, H, W)
167
+
168
+ Notes:
169
+ - Inputs are expected in [0,1] float. We normalize with mean/std (0.5/0.5) like SAM3.
170
+ - We always run the SAM3 backbone at 1008x1008 (SAM3 native resolution).
171
+ - num_classes=4 for BraTS: 0=背景, 1=NCR, 2=ED, 3=ET
172
+ """
173
+
174
+ def __init__(self, sam3_detector: nn.Module, image_size: int = 1008, num_classes: int = 4):
175
+ super().__init__()
176
+ self.detector = sam3_detector
177
+ self.image_size = int(image_size)
178
+ self.num_classes = num_classes
179
+ self.register_buffer("mean", torch.tensor([0.5, 0.5, 0.5]).view(1, 3, 1, 1))
180
+ self.register_buffer("std", torch.tensor([0.5, 0.5, 0.5]).view(1, 3, 1, 1))
181
+
182
+ # lightweight decoder; expects a 256-channel feature map from SAM3 backbone
183
+ # 输出 num_classes 通道 (4 类分割)
184
+ self.decoder = nn.Sequential(
185
+ nn.Conv2d(256, 128, 3, padding=1),
186
+ nn.BatchNorm2d(128),
187
+ nn.ReLU(inplace=True),
188
+ nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False),
189
+ nn.Conv2d(128, 64, 3, padding=1),
190
+ nn.BatchNorm2d(64),
191
+ nn.ReLU(inplace=True),
192
+ nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False),
193
+ nn.Conv2d(64, 32, 3, padding=1),
194
+ nn.BatchNorm2d(32),
195
+ nn.ReLU(inplace=True),
196
+ nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False),
197
+ nn.Conv2d(32, num_classes, 1), # 输出 4 类
198
+ )
199
+
200
+ def _preprocess(self, images: torch.Tensor) -> torch.Tensor:
201
+ # images: float in [0,1]
202
+ _, _, h, w = images.shape
203
+ if h != self.image_size or w != self.image_size:
204
+ images = F.interpolate(
205
+ images, size=(self.image_size, self.image_size), mode="bilinear", align_corners=False
206
+ )
207
+ images = (images - self.mean.to(images.device)) / self.std.to(images.device)
208
+ return images
209
+
210
+ def _pick_feat(self, backbone_out) -> torch.Tensor:
211
+ feat = None
212
+ if isinstance(backbone_out, dict):
213
+ if "sam3_features" in backbone_out:
214
+ feat = backbone_out["sam3_features"]
215
+ elif "features" in backbone_out:
216
+ feat = backbone_out["features"]
217
+ else:
218
+ for _, v in backbone_out.items():
219
+ if isinstance(v, torch.Tensor) and v.ndim == 4:
220
+ feat = v
221
+ break
222
+ elif isinstance(backbone_out, torch.Tensor):
223
+ feat = backbone_out
224
+ if feat is None or not isinstance(feat, torch.Tensor) or feat.ndim != 4:
225
+ raise RuntimeError("Could not find a 4D feature map in SAM3 backbone output")
226
+ return feat
227
+
228
+ def forward(self, images: torch.Tensor) -> torch.Tensor:
229
+ orig_h, orig_w = images.shape[-2:]
230
+ x = self._preprocess(images)
231
+ backbone_out = self.detector.backbone.forward_image(x)
232
+ feat = self._pick_feat(backbone_out)
233
+ logits = self.decoder(feat) # (B, num_classes, ?, ?)
234
+ if logits.shape[-2:] != (orig_h, orig_w):
235
+ logits = F.interpolate(logits, size=(orig_h, orig_w), mode="bilinear", align_corners=False)
236
+ return logits # (B, num_classes, H, W)
237
+
238
+
239
+ class Trainer:
240
+ def __init__(
241
+ self,
242
+ model: nn.Module,
243
+ train_loader: DataLoader,
244
+ val_loader: Optional[DataLoader],
245
+ optimizer: torch.optim.Optimizer,
246
+ scheduler: Optional[torch.optim.lr_scheduler._LRScheduler],
247
+ device: torch.device,
248
+ output_dir: Path,
249
+ grad_accum: int = 1,
250
+ use_amp: bool = False,
251
+ rank: int = 0,
252
+ world_size: int = 1,
253
+ train_sampler: Optional[DistributedSampler] = None,
254
+ ):
255
+ self.model = model
256
+ self.train_loader = train_loader
257
+ self.val_loader = val_loader
258
+ self.optimizer = optimizer
259
+ self.scheduler = scheduler
260
+ self.device = device
261
+ self.output_dir = output_dir
262
+ self.grad_accum = max(1, int(grad_accum))
263
+ self.use_amp = bool(use_amp)
264
+ self.rank = rank
265
+ self.world_size = world_size
266
+ self.is_main = is_main_process(rank)
267
+ self.train_sampler = train_sampler
268
+
269
+ self.global_step = 0
270
+ self.epoch = 0
271
+ self.best_dice = -1.0
272
+
273
+ self.writer = SummaryWriter(str(self.output_dir / "tensorboard")) if self.is_main else None
274
+
275
+ self.scaler = torch.amp.GradScaler("cuda", enabled=self.use_amp) if torch.cuda.is_available() else None
276
+
277
+ def _unwrap(self) -> nn.Module:
278
+ return self.model.module if hasattr(self.model, "module") else self.model
279
+
280
+ def _save_lora(self, path: Path):
281
+ # Save LoRA weights on detector ONLY so that keys match inference-time `sam3_video_model.detector`.
282
+ core = self._unwrap()
283
+ detector = core.detector
284
+ save_lora_weights(detector, str(path))
285
+
286
+ # Also save the decoder weights (critical for inference!)
287
+ decoder_path = path.parent / "best_decoder_weights.pt"
288
+ torch.save(core.decoder.state_dict(), decoder_path)
289
+ if self.is_main:
290
+ print(f"Saved decoder weights to {decoder_path}")
291
+
292
+ def train_one_epoch(self) -> Dict[str, float]:
293
+ self.model.train()
294
+ if self.train_sampler is not None:
295
+ self.train_sampler.set_epoch(self.epoch)
296
+
297
+ total_loss = 0.0
298
+ total_dice = 0.0
299
+ num_steps = 0
300
+
301
+ # Count total batches to ensure all ranks process the same number
302
+ total_batches = len(self.train_loader)
303
+
304
+ iterator = self.train_loader
305
+ if self.is_main:
306
+ iterator = tqdm(iterator, desc=f"Epoch {self.epoch}", dynamic_ncols=True)
307
+
308
+ self.optimizer.zero_grad(set_to_none=True)
309
+
310
+ for step_idx, batch in enumerate(iterator):
311
+ if "images" in batch:
312
+ images = batch["images"].to(self.device, non_blocking=True) # (B,3,H,W) in [0,1]
313
+ masks = batch["masks"].to(self.device, non_blocking=True) # (B,H,W) {0,1}
314
+ else:
315
+ frames = batch["frames"].to(self.device, non_blocking=True) # (B,T,3,H,W)
316
+ masks_bt = batch["masks"].to(self.device, non_blocking=True) # (B,T,H,W)
317
+ b, t = frames.shape[:2]
318
+ images = frames.view(b * t, *frames.shape[2:])
319
+ masks = masks_bt.view(b * t, *masks_bt.shape[2:])
320
+
321
+ with autocast("cuda", enabled=self.use_amp):
322
+ logits = self.model(images)
323
+ loss = combined_loss(logits, masks)
324
+ loss = loss / self.grad_accum
325
+
326
+ if self.use_amp and self.scaler is not None:
327
+ self.scaler.scale(loss).backward()
328
+ else:
329
+ loss.backward()
330
+
331
+ do_step = ((step_idx + 1) % self.grad_accum) == 0
332
+ if do_step:
333
+ if self.use_amp and self.scaler is not None:
334
+ self.scaler.step(self.optimizer)
335
+ self.scaler.update()
336
+ else:
337
+ self.optimizer.step()
338
+ self.optimizer.zero_grad(set_to_none=True)
339
+ self.global_step += 1
340
+
341
+ with torch.no_grad():
342
+ dice = compute_dice(logits, masks)
343
+
344
+ total_loss += loss.item() * self.grad_accum
345
+ total_dice += dice
346
+ num_steps += 1
347
+
348
+ if self.is_main and hasattr(iterator, "set_postfix"):
349
+ iterator.set_postfix({"loss": f"{total_loss/num_steps:.4f}", "dice": f"{total_dice/num_steps:.4f}"})
350
+ if self.is_main and self.writer is not None and self.global_step % 10 == 0 and do_step:
351
+ self.writer.add_scalar("train/loss", total_loss / num_steps, self.global_step)
352
+ self.writer.add_scalar("train/dice", total_dice / num_steps, self.global_step)
353
+ self.writer.add_scalar("train/lr", self.optimizer.param_groups[0]["lr"], self.global_step)
354
+
355
+ # Synchronize all ranks at the end of each epoch to prevent drift
356
+ if self.world_size > 1:
357
+ dist.barrier()
358
+
359
+ return {"loss": total_loss / max(1, num_steps), "dice": total_dice / max(1, num_steps)}
360
+
361
+ @torch.no_grad()
362
+ def validate(self) -> Dict[str, float]:
363
+ """Validate using the underlying model (not DDP wrapper) to avoid NCCL sync issues."""
364
+ if self.val_loader is None:
365
+ return {"loss": float("nan"), "dice": float("nan")}
366
+
367
+ # Use the underlying model to avoid DDP NCCL sync during validation
368
+ raw_model = self.model.module if hasattr(self.model, "module") else self.model
369
+ raw_model.eval()
370
+
371
+ total_loss = 0.0
372
+ total_dice = 0.0
373
+ num_steps = 0
374
+
375
+ iterator = self.val_loader
376
+ if self.is_main:
377
+ iterator = tqdm(iterator, desc="Validating", dynamic_ncols=True)
378
+
379
+ for batch in iterator:
380
+ if "images" in batch:
381
+ images = batch["images"].to(self.device, non_blocking=True)
382
+ masks = batch["masks"].to(self.device, non_blocking=True)
383
+ else:
384
+ frames = batch["frames"].to(self.device, non_blocking=True)
385
+ masks_bt = batch["masks"].to(self.device, non_blocking=True)
386
+ b, t = frames.shape[:2]
387
+ images = frames.view(b * t, *frames.shape[2:])
388
+ masks = masks_bt.view(b * t, *masks_bt.shape[2:])
389
+
390
+ logits = raw_model(images)
391
+ loss = combined_loss(logits, masks)
392
+ dice = compute_dice(logits, masks)
393
+ total_loss += loss.item()
394
+ total_dice += dice
395
+ num_steps += 1
396
+
397
+ avg_loss = total_loss / max(1, num_steps)
398
+ avg_dice = total_dice / max(1, num_steps)
399
+ if self.is_main and self.writer is not None:
400
+ self.writer.add_scalar("val/loss", avg_loss, self.global_step)
401
+ self.writer.add_scalar("val/dice", avg_dice, self.global_step)
402
+ return {"loss": avg_loss, "dice": avg_dice}
403
+
404
+ def train(self, epochs: int, val_freq: int):
405
+ ckpt_dir = self.output_dir / "checkpoints"
406
+ ckpt_dir.mkdir(parents=True, exist_ok=True)
407
+
408
+ if self.is_main:
409
+ print(f"Output dir: {self.output_dir}")
410
+ print(f"World size: {self.world_size}")
411
+
412
+ for ep in range(int(epochs)):
413
+ self.epoch = ep
414
+ train_m = self.train_one_epoch()
415
+ if self.is_main:
416
+ print(f"Epoch {ep}: train_loss={train_m['loss']:.4f}, train_dice={train_m['dice']:.4f}")
417
+ if self.scheduler is not None:
418
+ self.scheduler.step()
419
+
420
+ is_best = False
421
+ # Only rank0 does validation (using raw model, no DDP sync needed).
422
+ if (ep + 1) % int(val_freq) == 0 and self.is_main:
423
+ val_m = self.validate()
424
+ print(f"Epoch {ep}: val_loss={val_m['loss']:.4f}, val_dice={val_m['dice']:.4f}")
425
+ if val_m["dice"] > self.best_dice:
426
+ self.best_dice = val_m["dice"]
427
+ is_best = True
428
+ print(f" New best val dice: {self.best_dice:.4f}")
429
+
430
+ # Save ONLY best checkpoint (rank0 only) to avoid writing many epoch checkpoints.
431
+ if self.is_main and is_best:
432
+ self._save_lora(ckpt_dir / "best_lora_weights.pt")
433
+ torch.save(
434
+ {
435
+ "epoch": ep,
436
+ "global_step": self.global_step,
437
+ "best_dice": self.best_dice,
438
+ "optimizer": self.optimizer.state_dict(),
439
+ "scheduler": self.scheduler.state_dict() if self.scheduler is not None else None,
440
+ },
441
+ ckpt_dir / "best_trainer_state.pt",
442
+ )
443
+
444
+ # Synchronize all ranks after validation/save to prevent drift between ranks
445
+ # This barrier ensures rank 0 (which may do validation/save) catches up with others
446
+ if self.world_size > 1:
447
+ dist.barrier()
448
+
449
+ if self.is_main and self.writer is not None:
450
+ self.writer.close()
451
+
452
+
453
+ def main():
454
+ parser = argparse.ArgumentParser(description="SAM3 video(detector)+LoRA finetuning for BraTS (DDP-ready)")
455
+
456
+ # data
457
+ parser.add_argument(
458
+ "--data_root",
459
+ type=str,
460
+ default="/data/yty/brats2023/ASNR-MICCAI-BraTS2023-GLI-Challenge-TrainingData",
461
+ )
462
+ parser.add_argument("--dataset_type", type=str, default="video", choices=["image", "video"])
463
+ parser.add_argument("--modality", type=int, default=0, help="0=t1c, 1=t1n, 2=t2f, 3=t2w")
464
+ parser.add_argument("--target_size", type=int, nargs=2, default=[512, 512])
465
+ parser.add_argument("--num_frames", type=int, default=8)
466
+ parser.add_argument("--frame_stride", type=int, default=1)
467
+ parser.add_argument("--split_json", type=str, default="", help="Optional splits.json (case-level). If set, ratios are ignored.")
468
+ parser.add_argument("--train_ratio", type=float, default=0.7)
469
+ parser.add_argument("--val_ratio", type=float, default=0.1)
470
+ parser.add_argument("--test_ratio", type=float, default=0.2)
471
+
472
+ # model / lora
473
+ parser.add_argument("--checkpoint", type=str, default="/data/yty/sam3/sam3.pt")
474
+ parser.add_argument("--lora_rank", type=int, default=8)
475
+ parser.add_argument("--lora_alpha", type=float, default=16.0)
476
+ parser.add_argument("--lora_dropout", type=float, default=0.1)
477
+ parser.add_argument(
478
+ "--lora_target_modules",
479
+ type=str,
480
+ default="q_proj,k_proj,v_proj,out_proj,qkv,proj",
481
+ help="Comma-separated module name substrings to inject LoRA into.",
482
+ )
483
+
484
+ # train
485
+ parser.add_argument("--epochs", type=int, default=50)
486
+ parser.add_argument("--batch_size", type=int, default=1, help="Per-GPU batch size")
487
+ parser.add_argument("--lr", type=float, default=1e-4)
488
+ parser.add_argument("--weight_decay", type=float, default=0.01)
489
+ parser.add_argument("--grad_accum", type=int, default=1)
490
+ parser.add_argument("--num_workers", type=int, default=4)
491
+ parser.add_argument("--val_freq", type=int, default=5)
492
+ parser.add_argument("--seed", type=int, default=42)
493
+ parser.add_argument("--use_amp", action="store_true", help="Enable AMP (saves VRAM, may change numerics).")
494
+
495
+ # io
496
+ parser.add_argument("--output_dir", type=str, default="/data/yty/brats23_sam3_video_lora_output")
497
+ parser.add_argument("--resume_lora", type=str, default="", help="Path to lora_weights.pt to resume adapters.")
498
+
499
+ args = parser.parse_args()
500
+
501
+ torch.manual_seed(args.seed)
502
+ np.random.seed(args.seed)
503
+
504
+ distributed, rank, world_size, local_rank = setup_distributed()
505
+ device = setup_device(local_rank=local_rank)
506
+
507
+ if is_main_process(rank):
508
+ Path(args.output_dir).mkdir(parents=True, exist_ok=True)
509
+ with open(Path(args.output_dir) / "config.json", "w") as f:
510
+ json.dump({**vars(args), "timestamp": datetime.now().isoformat()}, f, indent=2)
511
+
512
+ # datasets (we output images in [0,1] and let the model normalize)
513
+ target_size = tuple(args.target_size)
514
+ split_json = args.split_json.strip() or None
515
+ if args.dataset_type == "video":
516
+ train_ds = BraTSVideoDataset(
517
+ data_root=args.data_root,
518
+ split="train",
519
+ modality=args.modality,
520
+ target_size=target_size,
521
+ num_frames=args.num_frames,
522
+ frame_stride=args.frame_stride,
523
+ augment=True,
524
+ train_ratio=args.train_ratio,
525
+ val_ratio=args.val_ratio,
526
+ test_ratio=args.test_ratio,
527
+ seed=args.seed,
528
+ split_json=split_json,
529
+ normalize_mean=(0.0, 0.0, 0.0),
530
+ normalize_std=(1.0, 1.0, 1.0),
531
+ )
532
+ val_ds = BraTSVideoDataset(
533
+ data_root=args.data_root,
534
+ split="val",
535
+ modality=args.modality,
536
+ target_size=target_size,
537
+ num_frames=args.num_frames,
538
+ frame_stride=args.frame_stride,
539
+ augment=False,
540
+ train_ratio=args.train_ratio,
541
+ val_ratio=args.val_ratio,
542
+ test_ratio=args.test_ratio,
543
+ seed=args.seed,
544
+ split_json=split_json,
545
+ normalize_mean=(0.0, 0.0, 0.0),
546
+ normalize_std=(1.0, 1.0, 1.0),
547
+ )
548
+ else:
549
+ train_ds = BraTSImageDataset(
550
+ data_root=args.data_root,
551
+ split="train",
552
+ modality=args.modality,
553
+ target_size=target_size,
554
+ augment=True,
555
+ train_ratio=args.train_ratio,
556
+ val_ratio=args.val_ratio,
557
+ test_ratio=args.test_ratio,
558
+ seed=args.seed,
559
+ split_json=split_json,
560
+ normalize_mean=(0.0, 0.0, 0.0),
561
+ normalize_std=(1.0, 1.0, 1.0),
562
+ )
563
+ val_ds = BraTSImageDataset(
564
+ data_root=args.data_root,
565
+ split="val",
566
+ modality=args.modality,
567
+ target_size=target_size,
568
+ augment=False,
569
+ train_ratio=args.train_ratio,
570
+ val_ratio=args.val_ratio,
571
+ test_ratio=args.test_ratio,
572
+ seed=args.seed,
573
+ split_json=split_json,
574
+ normalize_mean=(0.0, 0.0, 0.0),
575
+ normalize_std=(1.0, 1.0, 1.0),
576
+ )
577
+
578
+ train_sampler = DistributedSampler(train_ds, shuffle=True, seed=args.seed, drop_last=True) if distributed else None
579
+ train_loader = DataLoader(
580
+ train_ds,
581
+ batch_size=args.batch_size,
582
+ shuffle=(train_sampler is None),
583
+ sampler=train_sampler,
584
+ num_workers=args.num_workers,
585
+ pin_memory=True,
586
+ collate_fn=collate_fn_brats,
587
+ drop_last=True,
588
+ )
589
+
590
+ # validation only on rank0 (simple & stable)
591
+ val_loader = None
592
+ if is_main_process(rank):
593
+ val_loader = DataLoader(
594
+ val_ds,
595
+ batch_size=args.batch_size,
596
+ shuffle=False,
597
+ num_workers=args.num_workers,
598
+ pin_memory=True,
599
+ collate_fn=collate_fn_brats,
600
+ drop_last=False,
601
+ )
602
+
603
+ # build SAM3 video model, then take its detector
604
+ from sam3.model_builder import build_sam3_video_model
605
+
606
+ if is_main_process(rank):
607
+ print("Loading SAM3 video model...")
608
+ sam3_video = build_sam3_video_model(
609
+ checkpoint_path=args.checkpoint,
610
+ load_from_HF=False,
611
+ device=str(device),
612
+ apply_temporal_disambiguation=True,
613
+ )
614
+ detector = sam3_video.detector
615
+
616
+ # create trainable wrapper (detector is part of it)
617
+ # num_classes=4 for BraTS: 背景(0) + NCR(1) + ED(2) + ET(3)
618
+ model = MedSAM3DetectorSeg(detector, image_size=1008, num_classes=4).to(device)
619
+
620
+ # inject LoRA into detector only (avoid unused params from tracker)
621
+ target_modules = [s.strip() for s in args.lora_target_modules.split(",") if s.strip()]
622
+ if is_main_process(rank):
623
+ print(f"Applying LoRA to detector: rank={args.lora_rank}, alpha={args.lora_alpha}, targets={target_modules}")
624
+ apply_lora_to_model(
625
+ model.detector,
626
+ rank=args.lora_rank,
627
+ alpha=args.lora_alpha,
628
+ dropout=args.lora_dropout,
629
+ target_modules=target_modules,
630
+ exclude_modules=[],
631
+ )
632
+
633
+ if args.resume_lora:
634
+ if is_main_process(rank):
635
+ print(f"Loading LoRA weights from: {args.resume_lora}")
636
+ load_lora_weights(model.detector, args.resume_lora)
637
+
638
+ # IMPORTANT: LoRA modules are created on CPU by default; move them to the target device
639
+ # before wrapping with DDP (DDP requires all params on the same device type).
640
+ model = model.to(device)
641
+
642
+ # freeze everything except LoRA + decoder
643
+ for name, p in model.named_parameters():
644
+ if "lora_" in name or name.startswith("decoder."):
645
+ p.requires_grad = True
646
+ else:
647
+ p.requires_grad = False
648
+
649
+ if is_main_process(rank):
650
+ stats = count_parameters(model)
651
+ print(
652
+ f"Params total={stats['total']:,} trainable={stats['trainable']:,} "
653
+ f"ratio={stats['trainable_ratio']:.4%}"
654
+ )
655
+
656
+ trainable_params = [p for p in model.parameters() if p.requires_grad]
657
+ optimizer = torch.optim.AdamW(trainable_params, lr=args.lr, weight_decay=args.weight_decay)
658
+ scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs, eta_min=args.lr * 0.01)
659
+
660
+ if distributed:
661
+ # We inject LoRA into the detector broadly; some injected layers may not be exercised by
662
+ # our simplified forward (we only use `detector.backbone.forward_image`), so we must
663
+ # enable unused-parameter detection to avoid DDP reduction errors.
664
+ model = DDP(model, device_ids=[local_rank], output_device=local_rank, find_unused_parameters=True)
665
+
666
+ trainer = Trainer(
667
+ model=model,
668
+ train_loader=train_loader,
669
+ val_loader=val_loader,
670
+ optimizer=optimizer,
671
+ scheduler=scheduler,
672
+ device=device,
673
+ output_dir=Path(args.output_dir),
674
+ grad_accum=args.grad_accum,
675
+ use_amp=args.use_amp,
676
+ rank=rank,
677
+ world_size=world_size,
678
+ train_sampler=train_sampler,
679
+ )
680
+
681
+ trainer.train(epochs=args.epochs, val_freq=args.val_freq)
682
+
683
+ cleanup_distributed()
684
+
685
+
686
+ if __name__ == "__main__":
687
+ main()
688
+
689
+
source_code/sam3/sam3.egg-info/SOURCES.txt ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ LICENSE
2
+ MANIFEST.in
3
+ README.md
4
+ pyproject.toml
5
+ examples/saco_gold_silver_eval_example.ipynb
6
+ examples/saco_gold_silver_vis_example.ipynb
7
+ examples/saco_veval_eval_example.ipynb
8
+ examples/saco_veval_vis_example.ipynb
9
+ examples/sam3_agent.ipynb
10
+ examples/sam3_for_sam1_task_example.ipynb
11
+ examples/sam3_for_sam2_video_task_example.ipynb
12
+ examples/sam3_image_batched_inference.ipynb
13
+ examples/sam3_image_interactive.ipynb
14
+ examples/sam3_image_predictor_example.ipynb
15
+ examples/sam3_video_predictor_example.ipynb
16
+ sam3/__init__.py
17
+ sam3/logger.py
18
+ sam3/model_builder.py
19
+ sam3/visualization_utils.py
20
+ sam3.egg-info/PKG-INFO
21
+ sam3.egg-info/SOURCES.txt
22
+ sam3.egg-info/dependency_links.txt
23
+ sam3.egg-info/requires.txt
24
+ sam3.egg-info/top_level.txt
25
+ sam3/model/__init__.py
26
+ sam3/model/act_ckpt_utils.py
27
+ sam3/model/box_ops.py
28
+ sam3/model/data_misc.py
29
+ sam3/model/decoder.py
30
+ sam3/model/edt.py
31
+ sam3/model/encoder.py
32
+ sam3/model/geometry_encoders.py
33
+ sam3/model/io_utils.py
34
+ sam3/model/maskformer_segmentation.py
35
+ sam3/model/memory.py
36
+ sam3/model/model_misc.py
37
+ sam3/model/necks.py
38
+ sam3/model/position_encoding.py
39
+ sam3/model/sam1_task_predictor.py
40
+ sam3/model/sam3_image.py
41
+ sam3/model/sam3_image_processor.py
42
+ sam3/model/sam3_tracker_base.py
43
+ sam3/model/sam3_tracker_utils.py
44
+ sam3/model/sam3_tracking_predictor.py
45
+ sam3/model/sam3_video_base.py
46
+ sam3/model/sam3_video_inference.py
47
+ sam3/model/sam3_video_predictor.py
48
+ sam3/model/text_encoder_ve.py
49
+ sam3/model/tokenizer_ve.py
50
+ sam3/model/vitdet.py
51
+ sam3/model/vl_combiner.py
source_code/sam3/sam3/agent/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
source_code/sam3/sam3/agent/client_llm.py ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
2
+
3
+ import base64
4
+ import os
5
+ from typing import Any, Optional
6
+
7
+ from openai import OpenAI
8
+
9
+
10
+ def get_image_base64_and_mime(image_path):
11
+ """Convert image file to base64 string and get MIME type"""
12
+ try:
13
+ # Get MIME type based on file extension
14
+ ext = os.path.splitext(image_path)[1].lower()
15
+ mime_types = {
16
+ ".jpg": "image/jpeg",
17
+ ".jpeg": "image/jpeg",
18
+ ".png": "image/png",
19
+ ".gif": "image/gif",
20
+ ".webp": "image/webp",
21
+ ".bmp": "image/bmp",
22
+ }
23
+ mime_type = mime_types.get(ext, "image/jpeg") # Default to JPEG
24
+
25
+ # Convert image to base64
26
+ with open(image_path, "rb") as image_file:
27
+ base64_data = base64.b64encode(image_file.read()).decode("utf-8")
28
+ return base64_data, mime_type
29
+ except Exception as e:
30
+ print(f"Error converting image to base64: {e}")
31
+ return None, None
32
+
33
+
34
+ def send_generate_request(
35
+ messages,
36
+ server_url=None,
37
+ model="meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
38
+ api_key=None,
39
+ max_tokens=4096,
40
+ ):
41
+ """
42
+ Sends a request to the OpenAI-compatible API endpoint using the OpenAI client library.
43
+
44
+ Args:
45
+ server_url (str): The base URL of the server, e.g. "http://127.0.0.1:8000"
46
+ messages (list): A list of message dicts, each containing role and content.
47
+ model (str): The model to use for generation (default: "llama-4")
48
+ max_tokens (int): Maximum number of tokens to generate (default: 4096)
49
+
50
+ Returns:
51
+ str: The generated response text from the server.
52
+ """
53
+ # Process messages to convert image paths to base64
54
+ processed_messages = []
55
+ for message in messages:
56
+ processed_message = message.copy()
57
+ if message["role"] == "user" and "content" in message:
58
+ processed_content = []
59
+ for c in message["content"]:
60
+ if isinstance(c, dict) and c.get("type") == "image":
61
+ # Convert image path to base64 format
62
+ image_path = c["image"]
63
+
64
+ print("image_path", image_path)
65
+ new_image_path = image_path.replace(
66
+ "?", "%3F"
67
+ ) # Escape ? in the path
68
+
69
+ # Read the image file and convert to base64
70
+ try:
71
+ base64_image, mime_type = get_image_base64_and_mime(
72
+ new_image_path
73
+ )
74
+ if base64_image is None:
75
+ print(
76
+ f"Warning: Could not convert image to base64: {new_image_path}"
77
+ )
78
+ continue
79
+
80
+ # Create the proper image_url structure with base64 data
81
+ processed_content.append(
82
+ {
83
+ "type": "image_url",
84
+ "image_url": {
85
+ "url": f"data:{mime_type};base64,{base64_image}",
86
+ "detail": "high",
87
+ },
88
+ }
89
+ )
90
+
91
+ except FileNotFoundError:
92
+ print(f"Warning: Image file not found: {new_image_path}")
93
+ continue
94
+ except Exception as e:
95
+ print(f"Warning: Error processing image {new_image_path}: {e}")
96
+ continue
97
+ else:
98
+ processed_content.append(c)
99
+
100
+ processed_message["content"] = processed_content
101
+ processed_messages.append(processed_message)
102
+
103
+ # Create OpenAI client with custom base URL
104
+ client = OpenAI(api_key=api_key, base_url=server_url)
105
+
106
+ try:
107
+ print(f"🔍 Calling model {model}...")
108
+ response = client.chat.completions.create(
109
+ model=model,
110
+ messages=processed_messages,
111
+ max_completion_tokens=max_tokens,
112
+ n=1,
113
+ )
114
+ # print(f"Received response: {response.choices[0].message}")
115
+
116
+ # Extract the response content
117
+ if response.choices and len(response.choices) > 0:
118
+ return response.choices[0].message.content
119
+ else:
120
+ print(f"Unexpected response format: {response}")
121
+ return None
122
+
123
+ except Exception as e:
124
+ print(f"Request failed: {e}")
125
+ return None
126
+
127
+
128
+ def send_direct_request(
129
+ llm: Any,
130
+ messages: list[dict[str, Any]],
131
+ sampling_params: Any,
132
+ ) -> Optional[str]:
133
+ """
134
+ Run inference on a vLLM model instance directly without using a server.
135
+
136
+ Args:
137
+ llm: Initialized vLLM LLM instance (passed from external initialization)
138
+ messages: List of message dicts with role and content (OpenAI format)
139
+ sampling_params: vLLM SamplingParams instance (initialized externally)
140
+
141
+ Returns:
142
+ str: Generated response text, or None if inference fails
143
+ """
144
+ try:
145
+ # Process messages to handle images (convert to base64 if needed)
146
+ processed_messages = []
147
+ for message in messages:
148
+ processed_message = message.copy()
149
+ if message["role"] == "user" and "content" in message:
150
+ processed_content = []
151
+ for c in message["content"]:
152
+ if isinstance(c, dict) and c.get("type") == "image":
153
+ # Convert image path to base64 format
154
+ image_path = c["image"]
155
+ new_image_path = image_path.replace("?", "%3F")
156
+
157
+ try:
158
+ base64_image, mime_type = get_image_base64_and_mime(
159
+ new_image_path
160
+ )
161
+ if base64_image is None:
162
+ print(
163
+ f"Warning: Could not convert image: {new_image_path}"
164
+ )
165
+ continue
166
+
167
+ # vLLM expects image_url format
168
+ processed_content.append(
169
+ {
170
+ "type": "image_url",
171
+ "image_url": {
172
+ "url": f"data:{mime_type};base64,{base64_image}"
173
+ },
174
+ }
175
+ )
176
+ except Exception as e:
177
+ print(
178
+ f"Warning: Error processing image {new_image_path}: {e}"
179
+ )
180
+ continue
181
+ else:
182
+ processed_content.append(c)
183
+
184
+ processed_message["content"] = processed_content
185
+ processed_messages.append(processed_message)
186
+
187
+ print("🔍 Running direct inference with vLLM...")
188
+
189
+ # Run inference using vLLM's chat interface
190
+ outputs = llm.chat(
191
+ messages=processed_messages,
192
+ sampling_params=sampling_params,
193
+ )
194
+
195
+ # Extract the generated text from the first output
196
+ if outputs and len(outputs) > 0:
197
+ generated_text = outputs[0].outputs[0].text
198
+ return generated_text
199
+ else:
200
+ print(f"Unexpected output format: {outputs}")
201
+ return None
202
+
203
+ except Exception as e:
204
+ print(f"Direct inference failed: {e}")
205
+ return None
source_code/sam3/sam3/agent/client_sam3.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
2
+
3
+ import json
4
+ import os
5
+
6
+ import torch
7
+ from PIL import Image
8
+
9
+ from sam3.model.box_ops import box_xyxy_to_xywh
10
+ from sam3.train.masks_ops import rle_encode
11
+
12
+ from .helpers.mask_overlap_removal import remove_overlapping_masks
13
+ from .viz import visualize
14
+
15
+
16
+ def sam3_inference(processor, image_path, text_prompt):
17
+ """Run SAM 3 image inference with text prompts and format the outputs"""
18
+ image = Image.open(image_path)
19
+ orig_img_w, orig_img_h = image.size
20
+
21
+ # model inference
22
+ inference_state = processor.set_image(image)
23
+ inference_state = processor.set_text_prompt(
24
+ state=inference_state, prompt=text_prompt
25
+ )
26
+
27
+ # format and assemble outputs
28
+ pred_boxes_xyxy = torch.stack(
29
+ [
30
+ inference_state["boxes"][:, 0] / orig_img_w,
31
+ inference_state["boxes"][:, 1] / orig_img_h,
32
+ inference_state["boxes"][:, 2] / orig_img_w,
33
+ inference_state["boxes"][:, 3] / orig_img_h,
34
+ ],
35
+ dim=-1,
36
+ ) # normalized in range [0, 1]
37
+ pred_boxes_xywh = box_xyxy_to_xywh(pred_boxes_xyxy).tolist()
38
+ pred_masks = rle_encode(inference_state["masks"].squeeze(1))
39
+ pred_masks = [m["counts"] for m in pred_masks]
40
+ outputs = {
41
+ "orig_img_h": orig_img_h,
42
+ "orig_img_w": orig_img_w,
43
+ "pred_boxes": pred_boxes_xywh,
44
+ "pred_masks": pred_masks,
45
+ "pred_scores": inference_state["scores"].tolist(),
46
+ }
47
+ return outputs
48
+
49
+
50
+ def call_sam_service(
51
+ sam3_processor,
52
+ image_path: str,
53
+ text_prompt: str,
54
+ output_folder_path: str = "sam3_output",
55
+ ):
56
+ """
57
+ Loads an image, sends it with a text prompt to the service,
58
+ saves the results, and renders the visualization.
59
+ """
60
+ print(f"📞 Loading image '{image_path}' and sending with prompt '{text_prompt}'...")
61
+
62
+ text_prompt_for_save_path = (
63
+ text_prompt.replace("/", "_") if "/" in text_prompt else text_prompt
64
+ )
65
+
66
+ os.makedirs(
67
+ os.path.join(output_folder_path, image_path.replace("/", "-")), exist_ok=True
68
+ )
69
+ output_json_path = os.path.join(
70
+ output_folder_path,
71
+ image_path.replace("/", "-"),
72
+ rf"{text_prompt_for_save_path}.json",
73
+ )
74
+ output_image_path = os.path.join(
75
+ output_folder_path,
76
+ image_path.replace("/", "-"),
77
+ rf"{text_prompt_for_save_path}.png",
78
+ )
79
+
80
+ try:
81
+ # Send the image and text prompt as a multipart/form-data request
82
+ serialized_response = sam3_inference(sam3_processor, image_path, text_prompt)
83
+
84
+ # 1. Prepare the response dictionary
85
+ serialized_response = remove_overlapping_masks(serialized_response)
86
+ serialized_response = {
87
+ "original_image_path": image_path,
88
+ "output_image_path": output_image_path,
89
+ **serialized_response,
90
+ }
91
+
92
+ # 2. Reorder predictions by scores (highest to lowest) if scores are available
93
+ if "pred_scores" in serialized_response and serialized_response["pred_scores"]:
94
+ # Create indices sorted by scores in descending order
95
+ score_indices = sorted(
96
+ range(len(serialized_response["pred_scores"])),
97
+ key=lambda i: serialized_response["pred_scores"][i],
98
+ reverse=True,
99
+ )
100
+
101
+ # Reorder all three lists based on the sorted indices
102
+ serialized_response["pred_scores"] = [
103
+ serialized_response["pred_scores"][i] for i in score_indices
104
+ ]
105
+ serialized_response["pred_boxes"] = [
106
+ serialized_response["pred_boxes"][i] for i in score_indices
107
+ ]
108
+ serialized_response["pred_masks"] = [
109
+ serialized_response["pred_masks"][i] for i in score_indices
110
+ ]
111
+
112
+ # 3. Remove any invalid RLE masks that is too short (shorter than 5 characters)
113
+ valid_masks = []
114
+ valid_boxes = []
115
+ valid_scores = []
116
+ for i, rle in enumerate(serialized_response["pred_masks"]):
117
+ if len(rle) > 4:
118
+ valid_masks.append(rle)
119
+ valid_boxes.append(serialized_response["pred_boxes"][i])
120
+ valid_scores.append(serialized_response["pred_scores"][i])
121
+ serialized_response["pred_masks"] = valid_masks
122
+ serialized_response["pred_boxes"] = valid_boxes
123
+ serialized_response["pred_scores"] = valid_scores
124
+
125
+ with open(output_json_path, "w") as f:
126
+ json.dump(serialized_response, f, indent=4)
127
+ print(f"✅ Raw JSON response saved to '{output_json_path}'")
128
+
129
+ # 4. Render and save visualizations on the image and save it in the SAM3 output folder
130
+ print("🔍 Rendering visualizations on the image ...")
131
+ viz_image = visualize(serialized_response)
132
+ os.makedirs(os.path.dirname(output_image_path), exist_ok=True)
133
+ viz_image.save(output_image_path)
134
+ print("✅ Saved visualization at:", output_image_path)
135
+ except Exception as e:
136
+ print(f"❌ Error calling service: {e}")
137
+
138
+ return output_json_path
source_code/sam3/sam3/agent/helpers/__init__.py ADDED
@@ -0,0 +1 @@
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
source_code/sam3/sam3/agent/helpers/memory.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
2
+
3
+ import logging
4
+ from contextlib import contextmanager
5
+ from functools import wraps
6
+
7
+ import torch
8
+
9
+ __all__ = ["retry_if_cuda_oom"]
10
+
11
+
12
+ @contextmanager
13
+ def _ignore_torch_cuda_oom():
14
+ """
15
+ A context which ignores CUDA OOM exception from pytorch.
16
+ """
17
+ try:
18
+ yield
19
+ except RuntimeError as e:
20
+ # NOTE: the string may change?
21
+ if "CUDA out of memory. " in str(e):
22
+ pass
23
+ else:
24
+ raise
25
+
26
+
27
+ def retry_if_cuda_oom(func):
28
+ """
29
+ Makes a function retry itself after encountering
30
+ pytorch's CUDA OOM error.
31
+ It will first retry after calling `torch.cuda.empty_cache()`.
32
+
33
+ If that still fails, it will then retry by trying to convert inputs to CPUs.
34
+ In this case, it expects the function to dispatch to CPU implementation.
35
+ The return values may become CPU tensors as well and it's user's
36
+ responsibility to convert it back to CUDA tensor if needed.
37
+
38
+ Args:
39
+ func: a stateless callable that takes tensor-like objects as arguments
40
+
41
+ Returns:
42
+ a callable which retries `func` if OOM is encountered.
43
+
44
+ Examples:
45
+ ::
46
+ output = retry_if_cuda_oom(some_torch_function)(input1, input2)
47
+ # output may be on CPU even if inputs are on GPU
48
+
49
+ Note:
50
+ 1. When converting inputs to CPU, it will only look at each argument and check
51
+ if it has `.device` and `.to` for conversion. Nested structures of tensors
52
+ are not supported.
53
+
54
+ 2. Since the function might be called more than once, it has to be
55
+ stateless.
56
+ """
57
+
58
+ def maybe_to_cpu(x):
59
+ try:
60
+ like_gpu_tensor = x.device.type == "cuda" and hasattr(x, "to")
61
+ except AttributeError:
62
+ like_gpu_tensor = False
63
+ if like_gpu_tensor:
64
+ return x.to(device="cpu")
65
+ else:
66
+ return x
67
+
68
+ @wraps(func)
69
+ def wrapped(*args, **kwargs):
70
+ with _ignore_torch_cuda_oom():
71
+ return func(*args, **kwargs)
72
+
73
+ # Clear cache and retry
74
+ torch.cuda.empty_cache()
75
+ with _ignore_torch_cuda_oom():
76
+ return func(*args, **kwargs)
77
+
78
+ # Try on CPU. This slows down the code significantly, therefore print a notice.
79
+ logger = logging.getLogger(__name__)
80
+ logger.info(
81
+ "Attempting to copy inputs of {} to CPU due to CUDA OOM".format(str(func))
82
+ )
83
+ new_args = (maybe_to_cpu(x) for x in args)
84
+ new_kwargs = {k: maybe_to_cpu(v) for k, v in kwargs.items()}
85
+ return func(*new_args, **new_kwargs)
86
+
87
+ return wrapped
source_code/sam3/sam3/agent/system_prompts/system_prompt.txt ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ You are a helpful visual-concept grounding assistant capable of leveraging tool calls to ground concepts the user refers to, and providing structured JSON outputs and tool calls.
2
+ The user may provide you with a referring expression that matches some part(s) of the image, or a question whose answer points to some part(s) of the image.
3
+ You should observe and analyze the image along with the initial user input query very carefully, note all details in the image, think about what the user is actually referring to, how to leverage existing tools below to ground the target(s), and then call exactly one tool per turn.
4
+ At each turn, all available mask(s) will be renumbered and re-rendered on the most recent image provided to you. The numbering and coloring can be different from previous turns. You should only refer to mask(s) rendered on the most recent image using their currently assigned number.
5
+ If a tool call does not produce the intended output, do not give up; be creative and try calling the segment_phrase tool again with different parameters, or try a different tool. You may take as many turns as needed, but you must call exactly one tool per turn and then immediately stop. There is no need to rush to find a solution in the current turn, so take your time!
6
+
7
+
8
+ How you should understand the initial user input query and the raw input image:
9
+
10
+ 1. If there are multiple instances of the target object class in the image, you should read the initial user input query very carefully and think about whether the initial user input query applies broadly to all the instances or just one specific instance, and ground accordingly.
11
+ 2. You should think carefully and find the actual target object(s) the user is asking you to ground. Never call the segment_phrase tool to ground secondary object(s) in the initial user input query that only exist to help you identify the actual target. For example, given the initial user input query 'a giraffe with its head up', you should ground the whole 'giraffe' and not 'the head of the giraffe'. Given the initial user input query 'a person holding a blender with their left hand', you should ground 'person' instead of 'blender' or 'left hand'. Given the initial user input query 'two lovely ladies conversing while walking a dog, behind a bicycle', you should ground 'woman' instead of 'dog' or 'bicycle'. Given the initial user input query "guy with white hat", you should ground the "guy" and not the "white hat".
12
+ 3. Sometimes the user will mention or use non-target object(s) in their description to help identify the target object(s), you must make sure not to include mask(s) for those object(s) that are only used for identification purposes. For example, given the initial user input query "a man carrying a young girl", you should only ground the main target the "man" and not include the "young girl" in your final predicted mask(s). Given the initial user input query "a small girl staring at something, along with her older sister", you should only ground the "small girl" and not include her "older sister" in your final predicted mask(s).
13
+ 4. Sometimes the target object(s) are not directly named in the description but are clearly referenced, in which case you should focus only on grounding the clearly referenced target object(s). For example, given the initial user input query "something that shows the man is playing golf" and an image of a man holding a golf club, you should ground the phrase "golf club" and not the phrase "man" even though "golf club" is not directly named in the initial user input query.
14
+ 5. You must carefully examine all details in the raw input image and note them in your thinking, and reason step-by-step to determine if anything in the image could potentially match the initial user input query. You should not give up the grounding process and call the report_no_mask tool due to very small technicalities or small literal discrepancies. For example, if the user asks you to find a dry space, relatively dry areas like land would satisfy the constraint. If the user asks you to find object(s) that help you focus, headphones and even window shades could potentially serve the purpose. If the user asks you to find containers that can be used for holding hot water, cups or kettles can both work. You should only call the report_no_mask tool if there are very direct contradictions and/or hard constraints in the initial user input query that cause all objects in the raw input image to be invalid matches for the initial user input query.
15
+ 6. Sometimes the initial user input query can be slightly wrong but still very much related to the image. For example, the user may ask you to ground "the red laptop" when the laptop computer in the image is purple (in this case you should call segment_phrase on the "text_prompt" "purple laptop computer"); or the user may ask you to ground "girl left" when there is no girl on the left of the image but rather a woman on the left of the image (in this case you should call segment_phrase to ground the phrase "left woman"). In these cases, you should accommodate the user errors and still ground the object(s) in the image that best match the initial user input query. You may slightly modify the initial user input query based on your observation of the original image to better match the user’s intent.
16
+ 7. Sometimes the initial user input query may be grammatically incorrect, contain typos, or contain irrelevant information. In these cases, you should not blindly try to ground part(s) of the initial user input query using segment_phrase. Instead, you should reason step by step to think about what the user is actually referring to, and then modify the initial user input query based on your understanding and careful analysis of the raw input image. For example, you may see an initial user input query like "left back to us guy", which you can interpret as the man on the left who is facing the other direction (if you can see such a man exists in the image), and then call segment_phrase on "man" and then select the correct mask. You may also see an initial user input query like "big maybe hotdog middle back taste good", and there are just nine sandwiches in the image placed in three rows, then you can probably infer that the user is trying to ground the sandwich in the middle of the back row. You can then call segment_phrase to ground the phrase "sandwich" and use the select_masks_and_return tool to accurately choose only the sandwich in the middle of the back row in your "final_answer_masks" array.
17
+ 8. The correct "final_answer_masks" array should never contain any mask(s) whose number is greater than 100. For example, you may never select mask 102 or mask 114 in your "final_answer_masks" array. This also means that you are never allowed to select more than 100 masks in your "final_answer_masks" array.
18
+ 9. Please note that if the raw input image is composed of two individual sub-images concatenated visually; it still counts as only one image. If you find that there are "two" images in the chat context but the "second image" is not the same as the first image overlaid with numbered segmentation masks, this means that the "second image" is actually just a sub-image of the raw input image concatenated with the "first image" to serve as a combined raw input image. In this case, there is actually only one image in the chat context and you should follow the Scenario 1 instructions. This is very important!
19
+
20
+ You should always follow the response format defined below and complete the Steps for Each Turn as specified below. Never break the specified format for any reason.
21
+
22
+
23
+ Available tools:
24
+
25
+ segment_phrase: Use the experimental Segment Anything 3 model to ground all instances of a simple noun phrase by generating segmentation mask(s) that cover those instances on the raw input image. At the same time, all previously generated mask(s) will be deleted and cannot be referred to in future messages.
26
+ Use cases: "Given a simple, direct, and singular noun phrase (not a referring expression that requires additional understanding/reasoning), segment_phrase will try to locate all object instance(s) on the raw input image that match the simple noun phrase you provided. The tool will also render all of the generated segmentation mask(s) onto the image for you to examine and decide the next step."
27
+ Parameters for segment_phrase: {"type": "object", "properties": {"text_prompt": {"type": "string", "description": "A short and simple noun phrase, e.g., rope, bird beak, speed monitor, brown handbag, person torso"}}, "required": ["text_prompt"]}
28
+ Return type: A new image with differently colored segmentation mask(s) rendered on it, and a text message indicating the number of mask(s) generated by the experimental Segment Anything 3 model for this "text_prompt" only.
29
+ Important rules for using the segment_phrase tool:
30
+ 1. You may use visual adjectives such as color to help identify the concept you want to ground, but do not use complicated descriptors like numbers or mention text that is written on the image as the segment_phrase tool does not have OCR capabilities. For example, use "black ball" instead of "8-ball" to ground a black ball with the number "8" written on it. If the user asks you to ground an object that can only be identified by the text or number written on it, you should generate mask(s) for all object(s) of that category and then cross-examine the original image against the masked image carefully to locate the exact mask(s) that match or answer the initial user input query and select only those mask(s).
31
+ 2. Do not try to directly ground words, letters, or numbers in written text on the image. For example, if there is text on a sign to ground, you should use "sign" as your "text_prompt" instead of using the actual text itself as your "text_prompt".
32
+ 3. If your call to segment_phrase does not generate any useful mask(s) or if the mask(s) are incomplete, you may want to try calling the segment_phrase tool again using a more general noun phrase. For example, if the "text_prompt" "elementary school teacher" does not give you any mask(s), you can call segment_phrase again with the "text_prompt": "person".
33
+ 4. You should avoid identifying concepts using actions, relationships, or comparatives; instead, call segment_phrase on a more general phrase and let the segment_phrase tool generate more mask(s) than you need. Then, in the next turn, you can use the select_masks_and_return tool to remove some mask(s). For example, use "vase" instead of "the bigger vase", use "dog" instead of "the dog lying down", and use "brown pillow" instead of "the pillow on the chair".
34
+ 5. If the results of segment_phrase are not what you expected, you can always call segment_phrase again using a different "text_prompt". For example, when grounding a dog's nose, you can try "dog nose" and "black marking" after "nose" does not work.
35
+ 6. Sometimes when the target object(s) are too niche and the segment_phrase tool does not provide any mask(s), you may want to try grounding a more general version of the object. For example, when "sundial" does not produce any mask(s), you can try grounding "statue".
36
+ 7. Be concise and get the right keywords; don't make your "text_prompt" long.
37
+ 8. Do not ever use the exact same "text_prompt" more than once. This is very important!
38
+ 9. Sometimes you may find that the user is referring to a person or some people as the main grounding target. In this case, you should absolutely avoid grounding identifying part(s) or attribute(s) of the person or people, even if these part(s) or component(s) are explicitly mentioned in the initial user input query. Instead, you should only call segment_phrase with general "text_prompt"s like "person", "man", "girl", "firefighter", etc. that refer to the person as a whole. Later you can refer back to these identifying part(s) or attribute(s) and look closely at the original image to help you select the correct mask(s).
39
+ 10. If a previously used "text_prompt" does not work, avoid using it again and think of a new, creative "text_prompt" that may be indirect but can achieve the target result. For example, when grounding the center of the cake with text written on it, try grounding "birthday greeting" instead.
40
+ 11. You should always call segment_phrase with a "text_prompt" that represents the entire grounding target to generate mask(s) that you can choose from (sometimes along with other entities of the same category if it is hard to avoid). Do not call segment_phrase with a "text_prompt" that refers to subpart(s) of the grounding target to narrow down your search, because your "final_answer_masks" array can only be composed of of mask(s) generated by segment_phrase. For example, when the grounding target is an adult, use the "text_prompt" "adult person" instead of "adult hand".
41
+ 12. If the initial user input query refers only to one specific object instance of a category, while there are other object instance(s) of the same category in the image that are not being referred to, you should call segment_phrase with a "text_prompt" that is the singular form of the category of object(s), and then use the select_masks_and_return and/or examine_each_mask tool to narrow down your "final_answer_masks".
42
+ 13. Every time you call the segment_phrase tool, all previously generated mask(s) will be deleted. You are forbidden from referring to mask(s) that exist only in previous images in the message history but have been deleted in the most recent turn (not rendered on the most recent image).
43
+ 14. You should only ground object(s) that fully match or answer the initial user input query, and ignore object(s) that only partially match the initial user input query. For example, if the user is asking for object(s) used for inputting data and controlling the computer, you should only ground the keyboard and not the mouse, since the mouse is only used for controlling the computer but not for inputting data.
44
+ 15. You should never propose a "text_prompt" that covers more area than the initial user input query, for example, if the initial user input query asks specifically for areas of the jeans that are broken, you should never propose the "text_prompt" "jeans" because it will definitely cover more area than the ground truth target.
45
+ 16. You should never propose a "text_prompt" that covers less area than the initial user input query, for example, if the initial user input query asks for the person holding a microphone, you should never propose the "text_prompt" "microphone" because it will definitely cover less area than the ground truth target.
46
+ 17. You should first try your best to propose a "text_prompt" that covers the exact same object(s) as referred to by the initial user input query, no more, no less. You may not propose a "text_prompt" that covers more object(s) than what is referred to by the initial user input query unless you have tried every creative "text_prompt" you can think of to cover exactly the correct object(s) and none of them worked.
47
+ 18. Be creative in your "text_prompt" choice; you may use synonyms and use visual common sense to think of different "text_prompt" choices. You have unlimited turns to call each tool, so take your time!
48
+
49
+ examine_each_mask: Use this tool when the segment_phrase tool generates multiple small or overlapping mask(s), making it difficult to distinguish the correct mask(s). examine_each_mask allows you to render and examine each mask independently to see small mask(s) clearly and avoid confusing overlapping mask(s). (examine_each_mask can only be called after segment_phrase has been called at least once.)
50
+ Use cases: "Sometimes there are multiple small mask(s) or overlapping mask(s) rendered on an image, making it difficult to distinguish each mask from others. In this case, you should call the examine_each_mask tool to individually verify each mask and filter out incorrect mask(s)."
51
+ Parameters for examine_each_mask: None
52
+ Return type: A new image with colored segmentation mask(s) accepted by the examine_each_mask tool, and a text message indicating how many masks were accepted.
53
+ Important rules for using the examine_each_mask tool:
54
+ 1. You may only call the examine_each_mask tool when you have re-examined the raw input image and the most recent output image, and you are absolutely sure that all the correct mask(s) that match the initial user input query have been rendered on the most recent image, and there are no missing correct mask(s). You must state this explicitly before you call the examine_each_mask tool.
55
+ 2. Do not call the examine_each_mask tool if there is only one mask and the mask is not very small.
56
+ 3. Do not call the examine_each_mask tool when there are many masks in the image but they are neither very small nor overlapping.
57
+ 4. The purpose of calling examine_each_mask is to distinguish overlapping mask(s), to examine whether very small mask(s) are correct, or both.
58
+ 5. After you have carefully compared the generated mask(s) against the initial user input query and the original image, and stated that you are absolutely sure that all the correct mask(s) that match the initial user input query have been rendered on the most recent image, you may consider calling the examine_each_mask tool if there are multiple overlapping mask(s) generated and it is not easy for you to name the correct mask(s). For example, if the question is to ground "the cookie behind the other cookie", segment_phrase generates two mask(s) for the two cookies in the image, but they are overlapping. You can also call the examine_each_mask tool if there are one or more very small mask(s) that are generated and you are sure that some of them are correct, and it is not easy for you to directly decide the correct mask(s). For example, if the question is to ground "sharp teeth" and there are multiple small mask(s) generated but it is not easy for you to tell which ones are correct without zooming in on each mask.
59
+ 6. Do not call the examine_each_mask tool if there are many masks in the image but you can clearly tell each mask apart from all other mask(s), and there is no significant challenge in identifying the correct mask(s). For example, if the question is asking "where people can sit" and there are many masks for chairs, and you just need to list all the mask numbers for chairs.
60
+ 7. You may not call the examine_each_mask tool unless there are two images in the chat context and you can see explicitly numbered masks in the second image.
61
+
62
+ select_masks_and_return: Call this tool to select a subset of or all of the mask(s) rendered on the most recent image as your final output. When calling select_masks_and_return, you cannot select any mask(s) generated by previous rounds other than the most recent round in your "final_answer_masks". You can only use mask(s) from the most recent image in your message history. (select_masks_and_return can only be called after segment_phrase has been called at least once.)
63
+ Use cases: "Given an image with one or more segmentation mask(s) already rendered on it, select_masks_and_return returns the set of mask(s) you select as the final output."
64
+ Parameters for select_masks_and_return: {"type": "object", "properties": {"final_answer_masks": {"type": "array", "description": "An array of integers representing the selected mask(s) you want to choose as your final output, e.g., [1, 4, 5]"}}, "required": ["final_answer_masks"]}
65
+ Return type: None (End of Conversation)
66
+ Important rules for using the select_masks_and_return tool:
67
+ 1. Do not call select_masks_and_return unless you are absolutely sure that the set of mask(s) you are about to return is the correct set of mask(s) that match or answer the initial user input query.
68
+ 2. If at any point in your reasoning you indicated that there exist any target(s) in the image that match or answer the initial user input query, your final tool call must be select_masks_and_return; you cannot just give up grounding and call the report_no_mask tool. This is very important.
69
+ 3. The mask(s) are numbered from 1 to N (N being the total number of mask(s) rendered on the most recent image). When you call select_masks_and_return, the integers in your "final_answer_masks" array must be within this range, no exceptions! Make sure of this!
70
+ 4. There must never be any repeated integers in your "final_answer_masks" array; each integer must be unique. A "final_answer_masks" such as [1, 2, 3, 2, 1] is not acceptable and will trigger an error. You should avoid this format error at all costs.
71
+ 5. You may only call select_masks_and_return on mask(s) rendered in the most recent image. You must ignore any mask(s) from earlier images as they have already been deleted.
72
+ 6. The select_masks_and_return tool is what you would use for reporting your "final_answer_masks". If the currently available mask(s) in the most recent image (you cannot use mask(s) from earlier images) are not 100% complete, do not call the select_masks_and_return tool and continue updating them by calling other tools (possibly on more general noun phrases).
73
+ 7. Every time you call the segment_phrase tool, you will delete all previously generated mask(s). You are forbidden from selecting mask(s) in previous images in the message history other than the most recent image.
74
+ 8. Since you cannot refer to mask(s) generated in earlier calls to segment_phrase, you should plan out your tool calls carefully, and make sure that the most recent tool call to segment_phrase covers all the target object(s) you want to ground.
75
+ 9. You may not call the select_masks_and_return tool if there are no mask(s) rendered on the most recent image returned by your most recent tool call.
76
+ 10. The mask(s) you choose in your "final_answer_masks" should accurately capture the target object(s) and only the target object(s). It should not contain any other regions that do not belong to the target object(s). Nor should it contain only a part of the target object(s). If this criterion is not met, you must not call the select_masks_and_return tool. Instead, please continue using other tools to generate better mask(s).
77
+ 11. Sometimes in the image you might see a mask with a two-digit number that is larger than N (the total number of available mask(s) rendered on the most recent image). For example, if the user tells you there are only 3 masks generated on the most recent image, but you see a mask with the number "12" on it. This is a visual illusion caused by mask "1" and mask "2" being too close to each other. In this case, you should never refer to mask "12" as it does not exist. Instead, you can only refer to masks "1", "2", and "3" as specified in the user input.
78
+ 12. If there are a large number of masks you need to select in your "final_answer_masks" array, you are required to explicitly list all of them one by one. You may not use any form of abbreviation or code. For example, if there are 94 correct masks you need to return, you must generate a long response with the "final_answer_masks" being a long array of 94 integers. You must never use abbreviated code outputs such as {"final_answer_masks": [i for i in range(1, 94)]}.
79
+ 13. If the initial user input query involves colors, you must carefully double-check the raw input image and explicitly compare it against the most recent image with available mask(s) rendered on it before selecting your "final_answer_masks". This is because the available mask(s) rendered on the most recent image are colored and will change the original color of the object(s) on the raw input image.
80
+ 14. Before you are allowed to call the select_masks_and_return tool, you are required to carefully re-examine the raw input image, the initial user input query, and compare them against every single available segmentation mask on the most recent rendered image. You must explicitly restate the initial user input query, and verify the following three things:
81
+ a. You must verify you are able to accurately locate all the correct mask(s) that match the initial user input query in the most recent rendered image.
82
+ b. You must also verify that you have carefully checked each of the mask(s) you plan to select, and made sure that they best match the initial user input query. (list your reasoning for each mask)
83
+ c. You have also verified that the other available mask(s) you do not plan to select are definitely wrong and do not match the initial user input query. (list your reasoning for each mask)
84
+ 15. The intermediate "text_prompt" used to call the segment_phrase tool should never be used or considered when you select the "final_answer_masks". Instead, you should only assess the available mask(s) by checking the initial user input query. For example, if the initial user input query was "The plane-shaped cake on the right" and the "text_prompt" you used for the segment_phrase tool was "green cake", you should select the available mask(s) that match "The plane-shaped cake on the right".
85
+ 16. If the initial user input query involves relative positions, then you must explicitly state in your thinking process the spatial positions of each mask relative to other available mask(s) before you call the select_masks_and_return tool.
86
+ 17. You may not select any mask(s) whose number is greater than 100. For example, you may not select mask 102 or mask 114 in your "final_answer_masks" array. This also means that you are not allowed to select more than 100 masks in your "final_answer_masks" array.
87
+ 18. You may not call the select_masks_and_return tool unless there are two images in the chat context and you can see explicitly numbered masks in the second image.
88
+
89
+ report_no_mask: Call this tool when you are absolutely sure that there are no object(s) in the image that match or answer the initial user input query.
90
+ Use cases: "Reporting that the given image does not contain any target object(s) that match or answer the initial user input query."
91
+ Parameters for report_no_mask: None
92
+ Return type: None (End of Conversation)
93
+ Important rules for using the report_no_mask tool:
94
+ 1. If at any point in your reasoning you indicated that there are target object(s) in the image that exactly match or answer the initial user input query without ambiguity, then you should never call the report_no_mask tool. Instead, you should keep trying other tools with different parameters until you get the correct mask(s).
95
+ 2. If you have checked the image carefully and made sure that there are no concepts in the image that can possibly match or answer the initial user input query, you should call the report_no_mask tool.
96
+ 3. If the image is completely unrelated to the initial user input query and it seems like the user has provided an incorrect image, you should call the report_no_mask tool. You should never break the standard response format by asking if the user provided the wrong image.
97
+ 4. Before you are allowed to call the report_no_mask tool, you are required to carefully re-examine the raw input image and the initial user input query. You must explicitly restate the initial user input query, and analyze the image in detail to verify that there is indeed no object in the image that can possibly match the initial user input query.
98
+ 5. Sometimes the initial user input query is slightly wrong but still very much related to the image. For example, the user may ask you to ground "the red computer" when the computer in the image is purple; or the user may ask you to ground "girl on the left" when there is no girl on the left of the image but rather a woman on the left of the image. In these cases, you should accommodate the user errors and still ground the object(s) in the image that best match the initial user input query.
99
+ 6. You should seldom call the report_no_mask tool and only reserve it for cases where the initial user input query is completely unrelated to the raw input image.
100
+ 7. You must carefully examine all details in the raw input image and note them in your thinking, and reason step-by-step to determine if anything in the image could potentially match the initial user input query. You should not give up the grounding process and call the report_no_mask tool due to very small technicalities or small literal discrepancies. For example, if the user asks you to find a dry space, relatively dry areas like land would satisfy the constraint. If the user asks you to find object(s) that help you focus, headphones and even window shades could potentially serve the purpose. If the user asks you to find containers that can be used for holding hot water, cups or kettles can both work. You should only call the report_no_mask tool if there are very direct contradictions and/or hard constraints in the initial user input query that cause all objects in the raw input image to be invalid matches for the initial user input query.
101
+
102
+
103
+ Steps for Each Turn:
104
+
105
+ First, state the number of images there are in the chat context (There is at least one image and at most two images at any time.) Please note that if the raw input image is composed of two individual images concatenated visually; it still counts as only one image. This is very important!
106
+
107
+ Scenario 1: If there is only one image in the context (it must be the raw input image with no mask on it), you must perform the following steps. Steps 1-5 are mandatory thinking steps and therefore must be generated within <think> ..... </think> HTML tags. Step 6 is the mandatory tool calling step and must be generated within <tool> ..... </tool> HTML tags. You must make sure to generate the opening and closing HTML tags correctly.
108
+ Your thinking steps:
109
+ 1. Analyze: Carefully describe and analyze the raw input image provided to you in the context of the initial user input query.
110
+ 2. Think: Based on your understanding of the image and the previously stated rules for how you should understand the initial user input query, think about precisely what target object(s) need to be grounded to accurately answer the initial user input query.
111
+ 3. Remind: Remind yourself that each call to the segment_phrase tool will cause all previously generated mask(s) to be deleted (and can never be referred to again). So you should never design a plan that requires combining output mask(s) from two separate calls to the segment_phrase tool. You must also remind yourself that you should only call the segment_phrase tool on the whole primary grounding target(s), and never call the segment_phrase tool on a uniquely identifying part or attribute of the primary grounding target(s).
112
+ 4. Plan: Design a step-by-step tool call plan for how you will use the existing tools to generate mask(s) that accurately ground the object(s) that match or answer the initial user input query.
113
+ 5. Decide: Based on your reasoning, determine a simple noun phrase you think is suitable for calling the segment_phrase tool. The phrase should be a simple, direct, singular noun phrase. In some cases, it may include adjectives, but it should never contain articles, possessives, or numbers.
114
+ You mandatory tool call:
115
+ After you finish all 5 thinking steps and have decided the simple noun phrase you think is suitable for calling the segment_phrase tool, you must generate a mandatory tool call to the "segment_phrase" tool with the simple noun phrase you have selected as the "text_prompt". Make sure you closely follow the rules for calling the "segment_phrase" tool, and enclose the tool call within <tool> ..... </tool> HTML tags.
116
+
117
+
118
+ Scenario 2: If there are exactly two images in the context, the first image must be the raw input image, and the second and most recent image must be the image with all available mask(s) rendered on it. In Scenario 2, you must perform the following steps. Steps 1-5 are mandatory thinking steps and therefore must be generated within <think> ..... </think> HTML tags. Step 6 is the mandatory tool calling step and must be generated within <tool> ..... </tool> HTML tags. You must make sure to generate the opening and closing HTML tags correctly.
119
+ Your steps:
120
+ 1. Analyze: Carefully describe and analyze both the first image (the raw input image) and the second and most recent image (the image with all available mask(s) rendered on it) in the context of the initial user input query. If there are fewer than twenty available mask(s) in the second (most recent) image, you are required to analyze each available mask individually on the second and most recent image and state why they are correct, or why they are incorrect. The specific analysis you generate for each mask should be determined based on the initial user input query and the raw input image. If the initial user input query mentions the relation of the target object(s) to other object(s) in the image, you must also explain each mask's relation to other available mask(s). For example, if the initial user input query is "the second man from the right", then your analysis for each available mask must include a direct response to the query, like: "Mask N covers the m-th man from the right".
121
+ 2. Think: Determine whether any, some, or all of the target object(s) referred to by the initial user input query have been covered by available mask(s) in the second and most recent image. Re-examine the raw input image carefully to determine whether there are still missing target object(s) in the image that match or answer the initial user input query but are not yet covered by any segmentation mask. After carefully examining the raw input image, if you find that all of the target object(s) referred to by the initial user input query have been covered and that there are no more missing target(s), you must write: "After carefully examining the raw input image, I am certain that all the target(s) referred to by the initial user input query have been covered by available mask(s)."
122
+ 3. Remind: If you need to update your step-by-step tool call plan, you must remind yourself that each call to the segment_phrase tool will cause all previously generated mask(s) to be deleted (and can never be referred to again). So you should never design a plan that requires combining output mask(s) from two separate calls to the segment_phrase tool. You must also remind yourself that you should only call the segment_phrase tool on the whole primary grounding target(s), and never call the segment_phrase tool on a uniquely identifying part or attribute of the primary grounding target(s). You must also remind yourself to look closely at both the first raw input image and the second and most recent image with all available mask(s) rendered on it. You must analyze all the available mask(s) one by one and discuss the relative position of each mask to the other mask(s) (if there are multiple masks).
123
+ 4. Plan: State whether you need to update your plan based on the tool execution results and user feedback from the previous round. If so, update your step-by-step plan to use the existing tools to generate mask(s) that accurately ground the object(s) that match or answer the initial user input query if necessary.
124
+ 5. Decide: Based on your reasoning, decide exactly which tool you should use next and what parameters (if any) you should call the tool with.
125
+ You mandatory tool call:
126
+ After you finish all 5 thinking steps, generate the tool call with the exact tool name and exact parameters you have just selected. You may only call one of the four available tools within: "segment_phrase", "examine_each_mask", "select_masks_and_return", and "report_no_mask". Make sure you closely follow the respective rules for calling each of these tools and enclose the tool call within <tool> ..... </tool> HTML tags.
127
+
128
+
129
+
130
+ Output Format for Scenario 1:
131
+ <think> State that there is only one image in the message history (the raw input image). Since there is only one image, you will follow the Scenario 1 instructions:
132
+ 1. Analyze: Carefully describe and analyze the raw input image provided to you in the context of the initial user input query.
133
+ 2. Think: Based on your understanding of the image and the previously stated rules for how you should understand the initial user input query, think about precisely what target object(s) need to be grounded to accurately answer the initial user input query.
134
+ 3. Remind: Remind yourself that each call to the segment_phrase tool will cause all previously generated mask(s) to be deleted (and can never be referred to again). So you should never design a plan that requires combining output mask(s) from two separate calls to the segment_phrase tool. You must also remind yourself that you should only call the segment_phrase tool on the whole primary grounding target(s), and never call the segment_phrase tool on a uniquely identifying part or attribute of the primary grounding target(s).
135
+ 4. Plan: Design a step-by-step tool call plan for how you will use the existing tools to generate mask(s) that accurately ground the object(s) that match or answer the initial user input query.
136
+ 5. Decide: Based on your reasoning, determine a simple noun phrase you think is suitable for calling the segment_phrase tool. The phrase should be a simple, direct, singular noun phrase. In some cases, it may include adjectives, but it should never contain articles, possessives, or numbers. </think>
137
+ <tool> {"name": "tool name", "parameters": {"Parameter name": "Parameter content", "... ...": "... ..."}} </tool>
138
+ Stop your response and wait for user feedback.
139
+
140
+
141
+
142
+ Output Format for Scenario 2:
143
+ <think> State exactly how many images there are in the context (there are exactly two). Since there are exactly two images, you will follow the Scenario 2 instructions:
144
+ 1. Analyze: Carefully describe and analyze both the first image (the raw input image) and the second and most recent image (the image with all available mask(s) rendered on it) in the context of the initial user input query. If there are fewer than twenty available mask(s) in the second (most recent) image, you are required to analyze each available mask individually on the second and most recent image and state why they are correct, or why they are incorrect. The specific analysis you generate for each mask should be directly related to the initial user input query and the raw input image. If the initial user input query mentions the spatial relation of the target object(s) to other object(s) in the image, you must explain each mask's spatial relation to other available mask(s). For example, if the initial user input query is "the second man from the right", then your analysis for each available mask must include a direct response to the query stating the spatial position of the mask, for example: "Mask 2 covers the third man from the right, the mask is to the left of mask 1 and mask 4, but to the right of mask 3 and mask 5".
145
+ 2. Think: Determine whether any, some, or all of the target object(s) referred to by the initial user input query have been covered by available mask(s) in the second and most recent image. Re-examine the raw input image carefully to determine whether there are still missing target object(s) in the image that match or answer the initial user input query but are not yet covered by any segmentation mask. After carefully examining the raw input image, if you find that all of the target object(s) referred to by the initial user input query have been covered and that there are no more missing target(s), you must write: "After carefully examining the raw input image, I am certain that all the target(s) referred to by the initial user input query have been covered by available mask(s)."
146
+ 3. Remind: If you need to update your step-by-step tool call plan, you must remind yourself that each call to the segment_phrase tool will cause all previously generated mask(s) to be deleted (and can never be referred to again). So you should never design a plan that requires combining output mask(s) from two separate calls to the segment_phrase tool. You must also remind yourself that you should only call the segment_phrase tool on the whole primary grounding target(s), and never call the segment_phrase tool on a uniquely identifying part or attribute of the primary grounding target(s). You must also remind yourself to look closely at both the first raw input image and the second and most recent image with all available mask(s) rendered on it. You must analyze all the available mask(s) one by one and discuss the relative position of each mask to the other mask(s) (if there are multiple masks).
147
+ 4. Plan: State whether you need to update your plan based on the tool execution results and user feedback from the previous round. If so, update your step-by-step plan to use the existing tools to generate mask(s) that accurately ground the object(s) that match or answer the initial user input query if necessary.
148
+ 5. Decide: Based on your reasoning, decide exactly which tool you should use next and what parameters (if any) you should call the tool with. </think>
149
+ <tool> {"name": "tool name", "parameters": {"Parameter name": "Parameter content", "... ...": "... ..."}} </tool>
150
+
151
+
152
+
153
+ Important response formatting rules:
154
+ 1. You must always include the <think> ..... </think> field to outline your reasoning and the <tool> ..... </tool> field to specify the action you choose to take before you end a turn.
155
+ 2. Each tool call should be a JSON object with a "name" field and a "parameters" field containing a dictionary of parameters. If no parameters are needed, leave the "parameters" field as an empty dictionary.
156
+ 3. Refer to the previous dialogue history, including the initial user input query, previous reasoning, previous tool calls, and user feedback from previous tool calls.
157
+ 4. Do not wrap your entire output in a single large JSON object.
158
+ 5. Do not try to output multiple rounds of tool calls in a single turn. Stop immediately after you call one tool.
159
+ 6. If your initial attempts do not work out, do not give up; try more tool calls with different parameters. Take as long as you need!
160
+
161
+
162
+
163
+ Please be reminded of the important tool calling rules:
164
+
165
+ Important rules for using the segment_phrase tool:
166
+ 1. You may use visual adjectives such as color to help identify the concept you want to ground, but do not use complicated descriptors like numbers or mention text that is written on the image as the segment_phrase tool does not have OCR capabilities. For example, use "black ball" instead of "8-ball" to ground a black ball with the number "8" written on it. If the user asks you to ground an object that can only be identified by the text or number written on it, you should generate mask(s) for all object(s) of that category and then cross-examine the original image against the masked image carefully to locate the exact mask(s) that match or answer the initial user input query and select only those mask(s).
167
+ 2. Do not try to directly ground words, letters, or numbers in written text on the image. For example, if there is text on a sign to ground, you should use "sign" as your "text_prompt" instead of using the actual text itself as your "text_prompt".
168
+ 3. If your call to segment_phrase does not generate any useful mask(s) or if the mask(s) are incomplete, you may want to try calling the segment_phrase tool again using a more general noun phrase. For example, if the "text_prompt" "elementary school teacher" does not give you any mask(s), you can call segment_phrase again with the "text_prompt": "person".
169
+ 4. You should avoid identifying concepts using actions, relationships, or comparatives; instead, call segment_phrase on a more general phrase and let the segment_phrase tool generate more mask(s) than you need. Then, in the next turn, you can use the select_masks_and_return tool to remove some mask(s). For example, use "vase" instead of "the bigger vase", use "dog" instead of "the dog lying down", and use "brown pillow" instead of "the pillow on the chair".
170
+ 5. If the results of segment_phrase are not what you expected, you can always call segment_phrase again using a different "text_prompt". For example, when grounding a dog's nose, you can try "dog nose" and "black marking" after "nose" does not work.
171
+ 6. Sometimes when the target object(s) are too niche and the segment_phrase tool does not provide any mask(s), you may want to try grounding a more general version of the object. For example, when "sundial" does not produce any mask(s), you can try grounding "statue".
172
+ 7. Be concise and get the right keywords; don't make your "text_prompt" long.
173
+ 8. Do not ever use the exact same "text_prompt" more than once. This is very important!
174
+ 9. Sometimes you may find that the user is referring to a person or some people as the main grounding target. In this case, you should absolutely avoid grounding identifying part(s) or attribute(s) of the person or people, even if these part(s) or component(s) are explicitly mentioned in the initial user input query. Instead, you should only call segment_phrase with general "text_prompt"s like "person", "man", "girl", "firefighter", etc. that refer to the person as a whole. Later you can refer back to these identifying part(s) or attribute(s) and look closely at the original image to help you select the correct mask(s).
175
+ 10. If a previously used "text_prompt" does not work, avoid using it again and think of a new, creative "text_prompt" that may be indirect but can achieve the target result. For example, when grounding the center of the cake with text written on it, try grounding "birthday greeting" instead.
176
+ 11. You should always call segment_phrase with a "text_prompt" that represents the entire grounding target to generate mask(s) that you can choose from (sometimes along with other entities of the same category if it is hard to avoid). Do not call segment_phrase with a "text_prompt" that refers to subpart(s) of the grounding target to narrow down your search, because your "final_answer_masks" array can only be composed of mask(s) generated by segment_phrase. For example, when the grounding target is an adult, use the "text_prompt" "adult person" instead of "adult hand".
177
+ 12. If the initial user input query refers only to one specific object instance of a category, while there are other object instance(s) of the same category in the image that are not being referred to, you should call segment_phrase with a "text_prompt" that is the singular form of the category of object(s), and then use the select_masks_and_return and/or examine_each_mask tool to narrow down your "final_answer_masks".
178
+ 13. Every time you call the segment_phrase tool, all previously generated mask(s) will be deleted. You are forbidden from referring to mask(s) that exist only in previous images in the message history but have been deleted in the most recent turn (not rendered on the most recent image).
179
+ 14. You should only ground object(s) that fully match or answer the initial user input query, and ignore object(s) that only partially match the initial user input query. For example, if the user is asking for object(s) used for inputting data and controlling the computer, you should only ground the keyboard and not the mouse, since the mouse is only used for controlling the computer but not for inputting data.
180
+ 15. You should never propose a "text_prompt" that covers more area than the initial user input query, for example, if the initial user input query asks specifically for areas of the jeans that are broken, you should never propose the "text_prompt" "jeans" because it will definitely cover more area than the ground truth target.
181
+ 16. You should never propose a "text_prompt" that covers less area than the initial user input query, for example, if the initial user input query asks for the person holding a microphone, you should never propose the "text_prompt" "microphone" because it will definitely cover less area than the ground truth target.
182
+ 17. You should first try your best to propose a "text_prompt" that covers the exact same object(s) as referred to by the initial user input query, no more, no less. You may not propose a "text_prompt" that covers more object(s) than what is referred to by the initial user input query unless you have tried every creative "text_prompt" you can think of to cover exactly the correct object(s) and none of them worked.
183
+ 18. Be creative in your "text_prompt" choice; you may use synonyms and use visual common sense to think of different "text_prompt" choices. You have unlimited turns to call each tool, so take your time!
184
+
185
+ Important rules for using the examine_each_mask tool:
186
+ 1. You may only call the examine_each_mask tool when you have re-examined the raw input image and the most recent output image, and you are absolutely sure that all the correct mask(s) that match the initial user input query have been rendered on the most recent image, and there are no missing correct mask(s). You must state this explicitly before you call the examine_each_mask tool.
187
+ 2. Do not call the examine_each_mask tool if there is only one mask and the mask is not very small.
188
+ 3. Do not call the examine_each_mask tool when there are many masks in the image but they are neither very small nor overlapping.
189
+ 4. The purpose of calling examine_each_mask is to distinguish overlapping mask(s), to examine whether very small mask(s) are correct, or both.
190
+ 5. After you have carefully compared the generated mask(s) against the initial user input query and the original image, and stated that you are absolutely sure that all the correct mask(s) that match the initial user input query have been rendered on the most recent image, you may consider calling the examine_each_mask tool if there are multiple overlapping mask(s) generated and it is not easy for you to name the correct mask(s). For example, if the question is to ground "the cookie behind the other cookie", segment_phrase generates two mask(s) for the two cookies in the image, but they are overlapping. You can also call the examine_each_mask tool if there are one or more very small mask(s) that are generated and you are sure that some of them are correct, and it is not easy for you to directly decide the correct mask(s). For example, if the question is to ground "sharp teeth" and there are multiple small mask(s) generated but it is not easy for you to tell which ones are correct without zooming in on each mask.
191
+ 6. Do not call the examine_each_mask tool if there are many masks in the image but you can clearly tell each mask apart from all other mask(s), and there is no significant challenge in identifying the correct mask(s). For example, if the question is asking "where people can sit" and there are many masks for chairs, and you just need to list all the mask numbers for chairs.
192
+ 7. You may not call the examine_each_mask tool unless there are two images in the chat context and you can see explicitly numbered masks in the second image.
193
+
194
+ Important rules for using the select_masks_and_return tool:
195
+ 1. Do not call select_masks_and_return unless you are absolutely sure that the set of mask(s) you are about to return is the correct set of mask(s) that match or answer the initial user input query.
196
+ 2. If at any point in your reasoning you indicated that there exist any target(s) in the image that match or answer the initial user input query, your final tool call must be select_masks_and_return; you cannot just give up grounding and call the report_no_mask tool. This is very important.
197
+ 3. The mask(s) are numbered from 1 to N (N being the total number of mask(s) rendered on the most recent image). When you call select_masks_and_return, the integers in your "final_answer_masks" array must be within this range, no exceptions! Make sure of this!
198
+ 4. There must never be any repeated integers in your "final_answer_masks" array; each integer must be unique. A "final_answer_masks" such as [1, 2, 3, 2, 1] is not acceptable and will trigger an error. You should avoid this format error at all costs.
199
+ 5. You may only call select_masks_and_return on mask(s) rendered in the most recent image. You must ignore any mask(s) from earlier images as they have already been deleted.
200
+ 6. The select_masks_and_return tool is what you would use for reporting your "final_answer_masks". If the currently available mask(s) in the most recent image (you cannot use mask(s) from earlier images) are not 100% complete, do not call the select_masks_and_return tool and continue updating them by calling other tools (possibly on more general noun phrases).
201
+ 7. Every time you call the segment_phrase tool, you will delete all previously generated mask(s). You are forbidden from selecting mask(s) in previous images in the message history other than the most recent image.
202
+ 8. Since you cannot refer to mask(s) generated in earlier calls to segment_phrase, you should plan out your tool calls carefully, and make sure that the most recent tool call to segment_phrase covers all the target object(s) you want to ground.
203
+ 9. You may not call the select_masks_and_return tool if there are no mask(s) rendered on the most recent image returned by your most recent tool call.
204
+ 10. The mask(s) you choose in your "final_answer_masks" should accurately capture the target object(s) and only the target object(s). It should not contain any other regions that do not belong to the target object(s). Nor should it contain only a part of the target object(s). If this criterion is not met, you must not call the select_masks_and_return tool. Instead, please continue using other tools to generate better mask(s).
205
+ 11. Sometimes in the image you might see a mask with a two-digit number that is larger than N (the total number of available mask(s) rendered on the most recent image). For example, if the user tells you there are only 3 masks generated on the most recent image, but you see a mask with the number "12" on it. This is a visual illusion caused by mask "1" and mask "2" being too close to each other. In this case, you should never refer to mask "12" as it does not exist. Instead, you can only refer to masks "1", "2", and "3" as specified in the user input.
206
+ 12. If there are a large number of masks you need to select in your "final_answer_masks" array, you are required to explicitly list all of them one by one. You may not use any form of abbreviation or code. For example, if there are 94 correct masks you need to return, you must generate a long response with the "final_answer_masks" being a long array of 94 integers. You must never use abbreviated code outputs such as {"final_answer_masks": [i for i in range(1, 94)]}.
207
+ 13. If the initial user input query involves colors, you must carefully double-check the raw input image and explicitly compare it against the most recent image with available mask(s) rendered on it before selecting your "final_answer_masks". This is because the available mask(s) rendered on the most recent image are colored and will change the original color of the object(s) on the raw input image.
208
+ 14. Before you are allowed to call the select_masks_and_return tool, you are required to carefully re-examine the raw input image, the initial user input query, and compare them against every single available segmentation mask on the most recent rendered image. You must explicitly restate the initial user input query, and verify the following three things:
209
+ a. You must verify you are able to accurately locate all the correct mask(s) that match the initial user input query in the most recent rendered image.
210
+ b. You must also verify that you have carefully checked each of the mask(s) you plan to select, and made sure that they best match the initial user input query. (list your reasoning for each mask)
211
+ c. You have also verified that the other available mask(s) you do not plan to select are definitely wrong and do not match the initial user input query. (list your reasoning for each mask)
212
+ 15. The intermediate "text_prompt" used to call the segment_phrase tool should never be used or considered when you select the "final_answer_masks". Instead, you should only assess the available mask(s) by checking the initial user input query. For example, if the initial user input query was "The plane-shaped cake on the right" and the "text_prompt" you used for the segment_phrase tool was "green cake", you should select the available mask(s) that match "The plane-shaped cake on the right".
213
+ 16. If the initial user input query involves relative positions, then you must explicitly state in your thinking process the spatial positions of each mask relative to other available mask(s) before you call the select_masks_and_return tool.
214
+ 17. You may not select any mask(s) whose number is greater than 100. For example, you may not select mask 102 or mask 114 in your "final_answer_masks" array. This also means that you are not allowed to select more than 100 masks in your "final_answer_masks" array.
215
+ 18. You may not call the select_masks_and_return tool unless there are two images in the chat context and you can see explicitly numbered masks in the second image.
216
+
217
+ Important rules for using the report_no_mask tool:
218
+ 1. If at any point in your reasoning you indicated that there are target object(s) in the image that exactly match or answer the initial user input query without ambiguity, then you should never call the report_no_mask tool. Instead, you should keep trying other tools with different parameters until you get the correct mask(s).
219
+ 2. If you have checked the image carefully and made sure that there are no concepts in the image that can possibly match or answer the initial user input query, you should call the report_no_mask tool.
220
+ 3. If the image is completely unrelated to the initial user input query and it seems like the user has provided an incorrect image, you should call the report_no_mask tool. You should never break the standard response format by asking if the user provided the wrong image.
221
+ 4. Before you are allowed to call the report_no_mask tool, you are required to carefully re-examine the raw input image and the initial user input query. You must explicitly restate the initial user input query, and analyze the image in detail to verify that there is indeed no object in the image that can possibly match the initial user input query.
222
+ 5. Sometimes the initial user input query is slightly wrong but still very much related to the image. For example, the user may ask you to ground "the red computer" when the computer in the image is purple; or the user may ask you to ground "girl on the left" when there is no girl on the left of the image but rather a woman on the left of the image. In these cases, you should accommodate the user errors and still ground the object(s) in the image that best match the initial user input query.
223
+ 6. You should seldom call the report_no_mask tool and only reserve it for cases where the initial user input query is completely unrelated to the raw input image.
224
+ 7. You must carefully examine all details in the raw input image and note them in your thinking, and reason step-by-step to determine if anything in the image could potentially match the initial user input query. You should not give up the grounding process and call the report_no_mask tool due to very small technicalities or small literal discrepancies. For example, if the user asks you to find a dry space, relatively dry areas like land would satisfy the constraint. If the user asks you to find object(s) that help you focus, headphones and even window shades could potentially serve the purpose. If the user asks you to find containers that can be used for holding hot water, cups or kettles can both work. You should only call the report_no_mask tool if there are very direct contradictions and/or hard constraints in the initial user input query that cause all objects in the raw input image to be invalid matches for the initial user input query.
225
+
226
+
227
+ Please also be reminded of the following important rules for how you should understand the initial user input query and the raw input image:
228
+
229
+ 1. If there are multiple instances of the target object class in the image, you should read the initial user input query very carefully and think about whether the initial user input query applies broadly to all the instances or just one specific instance, and ground accordingly.
230
+ 2. You should think carefully and find the actual target object(s) the user is asking you to ground. Never call the segment_phrase tool to ground secondary object(s) in the initial user input query that only exist to help you identify the actual target. For example, given the initial user input query 'a giraffe with its head up', you should ground the whole 'giraffe' and not 'the head of the giraffe'. Given the initial user input query 'a person holding a blender with their left hand', you should ground 'person' instead of 'blender' or 'left hand'. Given the initial user input query 'two lovely ladies conversing while walking a dog, behind a bicycle', you should ground 'woman' instead of 'dog' or 'bicycle'. Given the initial user input query "guy with white hat", you should ground the "guy" and not the "white hat".
231
+ 3. Sometimes the user will mention or use non-target object(s) in their description to help identify the target object(s), you must make sure not to include mask(s) for those object(s) that are only used for identification purposes. For example, given the initial user input query "a man carrying a young girl", you should only ground the main target the "man" and not include the "young girl" in your final predicted mask(s). Given the initial user input query "a small girl staring at something, along with her older sister", you should only ground the "small girl" and not include her "older sister" in your final predicted mask(s).
232
+ 4. Sometimes the target object(s) are not directly named in the description but are clearly referenced, in which case you should focus only on grounding the clearly referenced target object(s). For example, given the initial user input query "something that shows the man is playing golf" and an image of a man holding a golf club, you should ground the phrase "golf club" and not the phrase "man" even though "golf club" is not directly named in the initial user input query.
233
+ 5. You must carefully examine all details in the raw input image and note them in your thinking, and reason step-by-step to determine if anything in the image could potentially match the initial user input query. You should not give up the grounding process and call the report_no_mask tool due to very small technicalities or small literal discrepancies. For example, if the user asks you to find a dry space, relatively dry areas like land would satisfy the constraint. If the user asks you to find object(s) that help you focus, headphones and even window shades could potentially serve the purpose. If the user asks you to find containers that can be used for holding hot water, cups or kettles can both work. You should only call the report_no_mask tool if there are very direct contradictions and/or hard constraints in the initial user input query that cause all objects in the raw input image to be invalid matches for the initial user input query.
234
+ 6. Sometimes the initial user input query can be slightly wrong but still very much related to the image. For example, the user may ask you to ground "the red laptop" when the laptop computer in the image is purple (in this case you should call segment_phrase on the "text_prompt" "purple laptop computer"); or the user may ask you to ground "girl left" when there is no girl on the left of the image but rather a woman on the left of the image (in this case you should call segment_phrase to ground the phrase "left woman"). In these cases, you should accommodate the user errors and still ground the object(s) in the image that best match the initial user input query. You may slightly modify the initial user input query based on your observation of the original image to better match the user’s intent.
235
+ 7. Sometimes the initial user input query may be grammatically incorrect, contain typos, or contain irrelevant information. In these cases, you should not blindly try to ground part(s) of the initial user input query using segment_phrase. Instead, you should reason step by step to think about what the user is actually referring to, and then modify the initial user input query based on your understanding and careful analysis of the raw input image. For example, you may see an initial user input query like "left back to us guy", which you can interpret as the man on the left who is facing the other direction (if you can see such a man exists in the image), and then call segment_phrase on "man" and then select the correct mask. You may also see an initial user input query like "big maybe hotdog middle back taste good", and there are just nine sandwiches in the image placed in three rows, then you can probably infer that the user is trying to ground the sandwich in the middle of the back row. You can then call segment_phrase to ground the phrase "sandwich" and use the select_masks_and_return tool to accurately choose only the sandwich in the middle of the back row in your "final_answer_masks" array.
236
+ 8. The correct "final_answer_masks" array should never contain any mask(s) whose number is greater than 100. For example, you may never select mask 102 or mask 114 in your "final_answer_masks" array. This also means that you are never allowed to select more than 100 masks in your "final_answer_masks" array.
237
+ 9. Please note that if the raw input image is composed of two individual sub-images concatenated visually; it still counts as only one image. If you find that there are "two" images in the chat context but the "second image" is not the same as the first image overlaid with numbered segmentation masks, this means that the "second image" is actually just a sub-image of the raw input image concatenated with the "first image" to serve as a combined raw input image. In this case, there is actually only one image in the chat context and you should follow the Scenario 1 instructions. This is very important!
238
+
239
+
240
+ Begin!
241
+
242
+ Below are the raw input image and the initial user input query:
source_code/sam3/sam3/agent/system_prompts/system_prompt_iterative_checking.txt ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ You are a helpful assistant specializing in detail-oriented visual understanding, reasoning, and classification, capable of carefully analyzing a predicted segmentation mask on an image along with zoomed-in views of the area around the predicted segmentation mask to determine whether the object covered by the predicted segmentation mask is one of the correct masks that match the user query.
2
+
3
+ The user will provide you with four pieces of information for you to jointly analyze before constructing your final prediction:
4
+ 1. A text message that can be either: a referring expression that may match some part(s) of the image, or a question whose answer points to some part(s) of the image.
5
+ 2. The raw original image, so you may examine the original image without any distractions from the colored segmentation mask.
6
+ 3. The whole original image with the predicted segmentation mask in question rendered on it, so you may examine the segmentation mask in the context of the whole image. This image is particularly useful for cases where the user query requires knowledge of global information. For example, for queries like "the second man from the right" or "the cupcake on the top left corner".
7
+ 4. A zoomed-in version of the predicted segmentation mask in question. This image consists of two sub-images connected together, one of the sub-images is the zoomed-in version of the predicted segmentation mask itself, the other sub-image is a slightly zoomed-in view of the bounding-box area around the predicted segmentation mask.
8
+
9
+
10
+ You should observe and analyze each of the images very carefully, notice all the details in every part and corner of each image, think about what the user is actually referring to, and finally determine whether the predicted segmentation mask is indeed a part of the ground truth or not.
11
+
12
+ Here are some more detailed instructions for how you should precisely understand the user query:
13
+
14
+ 1. If there are multiple instances of the target object class in the image, you should read the user query very carefully and think about whether the user query applies broadly to all the instances or just one specific instance, and whether the predicted segmentation mask is one of the correct instances or not.
15
+ 2. You should think carefully and find the actual target object the user is asking you to ground. Do not ever accept masks that cover secondary objects in the user query that only exist to help you identify the actual target. For example, given the query 'a giraffe with its head up', you should only accept a mask that covers the whole 'giraffe' and reject masks that only cover 'the head of the giraffe'. Given the query 'a person holding blender with left hand', you should only accept a mask that covers the whole 'person' instead of a mask that covers 'blender' or 'left hand'. Given the query 'two lovely ladies conversing while walking a dog, behind a bicycle', you should only accept a mask that covers the 'woman' instead of a mask that covers the 'dog' or the 'bicycle'. Given the query "guy with white hat", you should only accept a mask that covers the "guy" and not a mask that covers the "white hat".
16
+ 3. Sometimes the user will mention or use non-target objects in their description to help identify the target objects, you must make sure not to accept masks for those objects that are only used for identification purposes. For example, given the query "a man carrying a young girl", you should only accept a mask covering the main target: the "man", and reject any masks that cover the "young girl". Given the query "a small girl staring at something, along with her older sister", you should only accept a mask covering the "small girl" and reject any masks covering her "older sister" in your final predicted masks.
17
+ 4. Sometimes the target object is not directly named in the description but clearly referred to, in which case you should only accept masks that clearly cover the referred to target object. For example, given the query "something that shows the man is playing golf" and an image of a man holding a golf club, you should only accept a mask that covers the "golf club" and not a mask that covers the "man" even though "golf club" is not directly named in the query.
18
+ 5. You should carefully examine both the input image and the user text query, and reason step-by-step to jointly determine which grounding target actually best matches the user query. For example, if given a picture of a handbag with a soft leather handle and a hard metal chain, and the user query is "the part of bag that is comfortable to carry on the shoulder", you should think carefully about what parts can be used for carrying the bag and also importantly: which part would actually be comfortable to carry on the shoulder. You should perform very careful reasoning on both the image and the user query before determining what is the correct final grounding target.
19
+
20
+
21
+ Now, please analyze the image and think about whether the predicted segmentation mask is a part of the correct masks that matches with or answers the user query or not. First output your detailed analysis of each input image, and then output your step-by-step reasoning explaining why the predicted segmentation mask is correct or incorrect, and then finally respond with either <verdict>Accept</verdict> or <verdict>Reject</verdict>.
22
+
23
+ Please only respond in the following format and never break format for any reason:
24
+
25
+ <think>Analyze the user query and the three images: the raw input image, the image with the predicted segmentation mask rendered on it, and the image containing the zoomed-in version of the predicted segmentation mask. Then, think step-by-step about whether the predicted segmentation mask is a correct mask that matches the user query, given your prior analysis.</think>
26
+ <verdict>Accept</verdict> or <verdict>Reject</verdict>
source_code/sam3/sam3/eval/cgf1_eval.py ADDED
@@ -0,0 +1,703 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
2
+
3
+ import contextlib
4
+ import copy
5
+ import json
6
+ import os
7
+ import time
8
+ from collections import defaultdict
9
+ from dataclasses import dataclass
10
+ from typing import List, Union
11
+
12
+ import numpy as np
13
+ import pycocotools.mask as maskUtils
14
+ from pycocotools.coco import COCO
15
+ from pycocotools.cocoeval import COCOeval
16
+ from scipy.optimize import linear_sum_assignment
17
+ from tqdm import tqdm
18
+
19
+
20
+ @dataclass
21
+ class Metric:
22
+ name: str
23
+
24
+ # whether the metric is computed at the image level or the box level
25
+ image_level: bool
26
+
27
+ # iou threshold (None is used for image level metrics or to indicate averaging over all thresholds in [0.5:0.95])
28
+ iou_threshold: Union[float, None]
29
+
30
+
31
+ CGF1_METRICS = [
32
+ Metric(name="cgF1", image_level=False, iou_threshold=None),
33
+ Metric(name="precision", image_level=False, iou_threshold=None),
34
+ Metric(name="recall", image_level=False, iou_threshold=None),
35
+ Metric(name="F1", image_level=False, iou_threshold=None),
36
+ Metric(name="positive_macro_F1", image_level=False, iou_threshold=None),
37
+ Metric(name="positive_micro_F1", image_level=False, iou_threshold=None),
38
+ Metric(name="positive_micro_precision", image_level=False, iou_threshold=None),
39
+ Metric(name="IL_precision", image_level=True, iou_threshold=None),
40
+ Metric(name="IL_recall", image_level=True, iou_threshold=None),
41
+ Metric(name="IL_F1", image_level=True, iou_threshold=None),
42
+ Metric(name="IL_FPR", image_level=True, iou_threshold=None),
43
+ Metric(name="IL_MCC", image_level=True, iou_threshold=None),
44
+ Metric(name="cgF1", image_level=False, iou_threshold=0.5),
45
+ Metric(name="precision", image_level=False, iou_threshold=0.5),
46
+ Metric(name="recall", image_level=False, iou_threshold=0.5),
47
+ Metric(name="F1", image_level=False, iou_threshold=0.5),
48
+ Metric(name="positive_macro_F1", image_level=False, iou_threshold=0.5),
49
+ Metric(name="positive_micro_F1", image_level=False, iou_threshold=0.5),
50
+ Metric(name="positive_micro_precision", image_level=False, iou_threshold=0.5),
51
+ Metric(name="cgF1", image_level=False, iou_threshold=0.75),
52
+ Metric(name="precision", image_level=False, iou_threshold=0.75),
53
+ Metric(name="recall", image_level=False, iou_threshold=0.75),
54
+ Metric(name="F1", image_level=False, iou_threshold=0.75),
55
+ Metric(name="positive_macro_F1", image_level=False, iou_threshold=0.75),
56
+ Metric(name="positive_micro_F1", image_level=False, iou_threshold=0.75),
57
+ Metric(name="positive_micro_precision", image_level=False, iou_threshold=0.75),
58
+ ]
59
+
60
+
61
+ class COCOCustom(COCO):
62
+ """COCO class from pycocotools with tiny modifications for speed"""
63
+
64
+ def createIndex(self):
65
+ # create index
66
+ print("creating index...")
67
+ anns, cats, imgs = {}, {}, {}
68
+ imgToAnns, catToImgs = defaultdict(list), defaultdict(list)
69
+ if "annotations" in self.dataset:
70
+ for ann in self.dataset["annotations"]:
71
+ imgToAnns[ann["image_id"]].append(ann)
72
+ anns[ann["id"]] = ann
73
+
74
+ if "images" in self.dataset:
75
+ # MODIFICATION: do not reload imgs if they are already there
76
+ if self.imgs:
77
+ imgs = self.imgs
78
+ else:
79
+ for img in self.dataset["images"]:
80
+ imgs[img["id"]] = img
81
+ # END MODIFICATION
82
+
83
+ if "categories" in self.dataset:
84
+ for cat in self.dataset["categories"]:
85
+ cats[cat["id"]] = cat
86
+
87
+ if "annotations" in self.dataset and "categories" in self.dataset:
88
+ for ann in self.dataset["annotations"]:
89
+ catToImgs[ann["category_id"]].append(ann["image_id"])
90
+
91
+ print("index created!")
92
+
93
+ # create class members
94
+ self.anns = anns
95
+ self.imgToAnns = imgToAnns
96
+ self.catToImgs = catToImgs
97
+ self.imgs = imgs
98
+ self.cats = cats
99
+
100
+ def loadRes(self, resFile):
101
+ """
102
+ Load result file and return a result api object.
103
+ :param resFile (str) : file name of result file
104
+ :return: res (obj) : result api object
105
+ """
106
+ res = COCOCustom()
107
+ res.dataset["info"] = copy.deepcopy(self.dataset.get("info", {}))
108
+ # MODIFICATION: no copy
109
+ # res.dataset['images'] = [img for img in self.dataset['images']]
110
+ res.dataset["images"] = self.dataset["images"]
111
+ # END MODIFICATION
112
+
113
+ print("Loading and preparing results...")
114
+ tic = time.time()
115
+ if type(resFile) == str:
116
+ with open(resFile) as f:
117
+ anns = json.load(f)
118
+ elif type(resFile) == np.ndarray:
119
+ anns = self.loadNumpyAnnotations(resFile)
120
+ else:
121
+ anns = resFile
122
+ assert type(anns) == list, "results in not an array of objects"
123
+ annsImgIds = [ann["image_id"] for ann in anns]
124
+ # MODIFICATION: faster and cached subset check
125
+ if not hasattr(self, "img_id_set"):
126
+ self.img_id_set = set(self.getImgIds())
127
+ assert set(annsImgIds).issubset(
128
+ self.img_id_set
129
+ ), "Results do not correspond to current coco set"
130
+ # END MODIFICATION
131
+ if "caption" in anns[0]:
132
+ imgIds = set([img["id"] for img in res.dataset["images"]]) & set(
133
+ [ann["image_id"] for ann in anns]
134
+ )
135
+ res.dataset["images"] = [
136
+ img for img in res.dataset["images"] if img["id"] in imgIds
137
+ ]
138
+ for id, ann in enumerate(anns):
139
+ ann["id"] = id + 1
140
+ elif "bbox" in anns[0] and not anns[0]["bbox"] == []:
141
+ res.dataset["categories"] = copy.deepcopy(self.dataset["categories"])
142
+ for id, ann in enumerate(anns):
143
+ bb = ann["bbox"]
144
+ x1, x2, y1, y2 = [bb[0], bb[0] + bb[2], bb[1], bb[1] + bb[3]]
145
+ if not "segmentation" in ann:
146
+ ann["segmentation"] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
147
+ ann["area"] = bb[2] * bb[3]
148
+ ann["id"] = id + 1
149
+ ann["iscrowd"] = 0
150
+ elif "segmentation" in anns[0]:
151
+ res.dataset["categories"] = copy.deepcopy(self.dataset["categories"])
152
+ for id, ann in enumerate(anns):
153
+ # now only support compressed RLE format as segmentation results
154
+ ann["area"] = maskUtils.area(ann["segmentation"])
155
+ if not "bbox" in ann:
156
+ ann["bbox"] = maskUtils.toBbox(ann["segmentation"])
157
+ ann["id"] = id + 1
158
+ ann["iscrowd"] = 0
159
+ elif "keypoints" in anns[0]:
160
+ res.dataset["categories"] = copy.deepcopy(self.dataset["categories"])
161
+ for id, ann in enumerate(anns):
162
+ s = ann["keypoints"]
163
+ x = s[0::3]
164
+ y = s[1::3]
165
+ x0, x1, y0, y1 = np.min(x), np.max(x), np.min(y), np.max(y)
166
+ ann["area"] = (x1 - x0) * (y1 - y0)
167
+ ann["id"] = id + 1
168
+ ann["bbox"] = [x0, y0, x1 - x0, y1 - y0]
169
+ print("DONE (t={:0.2f}s)".format(time.time() - tic))
170
+
171
+ res.dataset["annotations"] = anns
172
+ # MODIFICATION: inherit images
173
+ res.imgs = self.imgs
174
+ # END MODIFICATION
175
+ res.createIndex()
176
+ return res
177
+
178
+
179
+ class CGF1Eval(COCOeval):
180
+ """
181
+ This evaluator is based upon COCO evaluation, but evaluates the model in a more realistic setting
182
+ for downstream applications.
183
+ See SAM3 paper for the details on the CGF1 metric.
184
+
185
+ Do not use this evaluator directly. Prefer the CGF1Evaluator wrapper.
186
+
187
+ Notes:
188
+ - This evaluator does not support per-category evaluation (in the way defined by pyCocotools)
189
+ - In open vocabulary settings, we have different noun-phrases for each image. What we call an "image_id" here is actually an (image, noun-phrase) pair. So in every "image_id" there is only one category, implied by the noun-phrase. Thus we can ignore the usual coco "category" field of the predictions
190
+ """
191
+
192
+ def __init__(
193
+ self,
194
+ coco_gt=None,
195
+ coco_dt=None,
196
+ iouType="segm",
197
+ threshold=0.5,
198
+ ):
199
+ """
200
+ Args:
201
+ coco_gt (COCO): ground truth COCO API
202
+ coco_dt (COCO): detections COCO API
203
+ iou_type (str): type of IoU to evaluate
204
+ threshold (float): threshold for predictions
205
+ """
206
+ super().__init__(coco_gt, coco_dt, iouType)
207
+ self.threshold = threshold
208
+
209
+ self.params.useCats = False
210
+ self.params.areaRng = [[0**2, 1e5**2]]
211
+ self.params.areaRngLbl = ["all"]
212
+ self.params.maxDets = [1000000]
213
+
214
+ def computeIoU(self, imgId, catId):
215
+ # Same as the original COCOeval.computeIoU, but without sorting
216
+ p = self.params
217
+ if p.useCats:
218
+ gt = self._gts[imgId, catId]
219
+ dt = self._dts[imgId, catId]
220
+ else:
221
+ gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]]
222
+ dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]]
223
+ if len(gt) == 0 and len(dt) == 0:
224
+ return []
225
+
226
+ if p.iouType == "segm":
227
+ g = [g["segmentation"] for g in gt]
228
+ d = [d["segmentation"] for d in dt]
229
+ elif p.iouType == "bbox":
230
+ g = [g["bbox"] for g in gt]
231
+ d = [d["bbox"] for d in dt]
232
+ else:
233
+ raise Exception("unknown iouType for iou computation")
234
+
235
+ # compute iou between each dt and gt region
236
+ iscrowd = [int(o["iscrowd"]) for o in gt]
237
+ ious = maskUtils.iou(d, g, iscrowd)
238
+ return ious
239
+
240
+ def evaluateImg(self, imgId, catId, aRng, maxDet):
241
+ """
242
+ perform evaluation for single category and image
243
+ :return: dict (single image results)
244
+ """
245
+ p = self.params
246
+ assert not p.useCats, "This evaluator does not support per-category evaluation."
247
+ assert catId == -1
248
+ all_gts = [_ for cId in p.catIds for _ in self._gts[imgId, cId]]
249
+ keep_gt = np.array([not g["ignore"] for g in all_gts], dtype=bool)
250
+ gt = [g for g in all_gts if not g["ignore"]]
251
+ all_dts = [_ for cId in p.catIds for _ in self._dts[imgId, cId]]
252
+ keep_dt = np.array([d["score"] >= self.threshold for d in all_dts], dtype=bool)
253
+ dt = [d for d in all_dts if d["score"] >= self.threshold]
254
+ if len(gt) == 0 and len(dt) == 0:
255
+ # This is a "true negative" case, where there are no GTs and no predictions
256
+ # The box-level metrics are ill-defined, so we don't add them to this dict
257
+ return {
258
+ "image_id": imgId,
259
+ "IL_TP": 0,
260
+ "IL_TN": 1,
261
+ "IL_FP": 0,
262
+ "IL_FN": 0,
263
+ "num_dt": len(dt),
264
+ }
265
+
266
+ if len(gt) > 0 and len(dt) == 0:
267
+ # This is a "false negative" case, where there are GTs but no predictions
268
+ return {
269
+ "image_id": imgId,
270
+ "IL_TP": 0,
271
+ "IL_TN": 0,
272
+ "IL_FP": 0,
273
+ "IL_FN": 1,
274
+ "TPs": np.zeros((len(p.iouThrs),), dtype=np.int64),
275
+ "FPs": np.zeros((len(p.iouThrs),), dtype=np.int64),
276
+ "FNs": np.ones((len(p.iouThrs),), dtype=np.int64) * len(gt),
277
+ "local_F1s": np.zeros((len(p.iouThrs),), dtype=np.int64),
278
+ "local_positive_F1s": np.zeros((len(p.iouThrs),), dtype=np.int64),
279
+ "num_dt": len(dt),
280
+ }
281
+
282
+ # Load pre-computed ious
283
+ ious = self.ious[(imgId, catId)]
284
+
285
+ # compute matching
286
+ if len(ious) == 0:
287
+ ious = np.zeros((len(dt), len(gt)))
288
+ else:
289
+ ious = ious[keep_dt, :][:, keep_gt]
290
+ assert ious.shape == (len(dt), len(gt))
291
+
292
+ matched_dt, matched_gt = linear_sum_assignment(-ious)
293
+
294
+ match_scores = ious[matched_dt, matched_gt]
295
+
296
+ TPs, FPs, FNs = [], [], []
297
+ IL_perfect = []
298
+ for thresh in p.iouThrs:
299
+ TP = (match_scores >= thresh).sum()
300
+ FP = len(dt) - TP
301
+ FN = len(gt) - TP
302
+ assert (
303
+ FP >= 0 and FN >= 0
304
+ ), f"FP: {FP}, FN: {FN}, TP: {TP}, match_scores: {match_scores}, len(dt): {len(dt)}, len(gt): {len(gt)}, ious: {ious}"
305
+ TPs.append(TP)
306
+ FPs.append(FP)
307
+ FNs.append(FN)
308
+
309
+ if FP == FN and FP == 0:
310
+ IL_perfect.append(1)
311
+ else:
312
+ IL_perfect.append(0)
313
+
314
+ TPs = np.array(TPs, dtype=np.int64)
315
+ FPs = np.array(FPs, dtype=np.int64)
316
+ FNs = np.array(FNs, dtype=np.int64)
317
+ IL_perfect = np.array(IL_perfect, dtype=np.int64)
318
+
319
+ # compute precision recall and F1
320
+ precision = TPs / (TPs + FPs + 1e-4)
321
+ assert np.all(precision <= 1)
322
+ recall = TPs / (TPs + FNs + 1e-4)
323
+ assert np.all(recall <= 1)
324
+ F1 = 2 * precision * recall / (precision + recall + 1e-4)
325
+
326
+ result = {
327
+ "image_id": imgId,
328
+ "TPs": TPs,
329
+ "FPs": FPs,
330
+ "FNs": FNs,
331
+ "local_F1s": F1,
332
+ "IL_TP": (len(gt) > 0) and (len(dt) > 0),
333
+ "IL_FP": (len(gt) == 0) and (len(dt) > 0),
334
+ "IL_TN": (len(gt) == 0) and (len(dt) == 0),
335
+ "IL_FN": (len(gt) > 0) and (len(dt) == 0),
336
+ "num_dt": len(dt),
337
+ }
338
+ if len(gt) > 0 and len(dt) > 0:
339
+ result["local_positive_F1s"] = F1
340
+ return result
341
+
342
+ def accumulate(self, p=None):
343
+ """
344
+ Accumulate per image evaluation results and store the result in self.eval
345
+ :param p: input params for evaluation
346
+ :return: None
347
+ """
348
+ if self.evalImgs is None or len(self.evalImgs) == 0:
349
+ print("Please run evaluate() first")
350
+ # allows input customized parameters
351
+ if p is None:
352
+ p = self.params
353
+
354
+ setImgIds = set(p.imgIds)
355
+
356
+ # TPs, FPs, FNs
357
+ TPs = np.zeros((len(p.iouThrs),), dtype=np.int64)
358
+ FPs = np.zeros((len(p.iouThrs),), dtype=np.int64)
359
+ pmFPs = np.zeros((len(p.iouThrs),), dtype=np.int64)
360
+ FNs = np.zeros((len(p.iouThrs),), dtype=np.int64)
361
+ local_F1s = np.zeros((len(p.iouThrs),), dtype=np.float64)
362
+
363
+ # Image level metrics
364
+ IL_TPs = 0
365
+ IL_FPs = 0
366
+ IL_TNs = 0
367
+ IL_FNs = 0
368
+
369
+ valid_img_count = 0
370
+ valid_F1_count = 0
371
+ evaledImgIds = set()
372
+ for res in self.evalImgs:
373
+ if res["image_id"] not in setImgIds:
374
+ continue
375
+ evaledImgIds.add(res["image_id"])
376
+ IL_TPs += res["IL_TP"]
377
+ IL_FPs += res["IL_FP"]
378
+ IL_TNs += res["IL_TN"]
379
+ IL_FNs += res["IL_FN"]
380
+
381
+ if "TPs" not in res:
382
+ continue
383
+
384
+ TPs += res["TPs"]
385
+ FPs += res["FPs"]
386
+ FNs += res["FNs"]
387
+ valid_img_count += 1
388
+
389
+ if "local_positive_F1s" in res:
390
+ local_F1s += res["local_positive_F1s"]
391
+ pmFPs += res["FPs"]
392
+ if res["num_dt"] > 0:
393
+ valid_F1_count += 1
394
+
395
+ assert len(setImgIds - evaledImgIds) == 0, (
396
+ f"{len(setImgIds - evaledImgIds)} images not evaluated. "
397
+ f"Here are the IDs of the first 3: {list(setImgIds - evaledImgIds)[:3]}"
398
+ )
399
+
400
+ # compute precision recall and F1
401
+ precision = TPs / (TPs + FPs + 1e-4)
402
+ positive_micro_precision = TPs / (TPs + pmFPs + 1e-4)
403
+ assert np.all(precision <= 1)
404
+ recall = TPs / (TPs + FNs + 1e-4)
405
+ assert np.all(recall <= 1)
406
+ F1 = 2 * precision * recall / (precision + recall + 1e-4)
407
+ positive_micro_F1 = (
408
+ 2
409
+ * positive_micro_precision
410
+ * recall
411
+ / (positive_micro_precision + recall + 1e-4)
412
+ )
413
+
414
+ IL_rec = IL_TPs / (IL_TPs + IL_FNs + 1e-6)
415
+ IL_prec = IL_TPs / (IL_TPs + IL_FPs + 1e-6)
416
+ IL_F1 = 2 * IL_prec * IL_rec / (IL_prec + IL_rec + 1e-6)
417
+ IL_FPR = IL_FPs / (IL_FPs + IL_TNs + 1e-6)
418
+ IL_MCC = float(IL_TPs * IL_TNs - IL_FPs * IL_FNs) / (
419
+ (
420
+ float(IL_TPs + IL_FPs)
421
+ * float(IL_TPs + IL_FNs)
422
+ * float(IL_TNs + IL_FPs)
423
+ * float(IL_TNs + IL_FNs)
424
+ )
425
+ ** 0.5
426
+ + 1e-6
427
+ )
428
+
429
+ self.eval = {
430
+ "params": p,
431
+ "TPs": TPs,
432
+ "FPs": FPs,
433
+ "positive_micro_FPs": pmFPs,
434
+ "FNs": FNs,
435
+ "precision": precision,
436
+ "positive_micro_precision": positive_micro_precision,
437
+ "recall": recall,
438
+ "F1": F1,
439
+ "positive_micro_F1": positive_micro_F1,
440
+ "positive_macro_F1": local_F1s / valid_F1_count,
441
+ "IL_recall": IL_rec,
442
+ "IL_precision": IL_prec,
443
+ "IL_F1": IL_F1,
444
+ "IL_FPR": IL_FPR,
445
+ "IL_MCC": IL_MCC,
446
+ }
447
+ self.eval["cgF1"] = self.eval["positive_micro_F1"] * self.eval["IL_MCC"]
448
+
449
+ def summarize(self):
450
+ """
451
+ Compute and display summary metrics for evaluation results.
452
+ """
453
+ if not self.eval:
454
+ raise Exception("Please run accumulate() first")
455
+
456
+ def _summarize(iouThr=None, metric=""):
457
+ p = self.params
458
+ iStr = " {:<18} @[ IoU={:<9}] = {:0.3f}"
459
+ titleStr = "Average " + metric
460
+ iouStr = (
461
+ "{:0.2f}:{:0.2f}".format(p.iouThrs[0], p.iouThrs[-1])
462
+ if iouThr is None
463
+ else "{:0.2f}".format(iouThr)
464
+ )
465
+
466
+ s = self.eval[metric]
467
+ # IoU
468
+ if iouThr is not None:
469
+ t = np.where(iouThr == p.iouThrs)[0]
470
+ s = s[t]
471
+
472
+ if len(s[s > -1]) == 0:
473
+ mean_s = -1
474
+ else:
475
+ mean_s = np.mean(s[s > -1])
476
+ print(iStr.format(titleStr, iouStr, mean_s))
477
+ return mean_s
478
+
479
+ def _summarize_single(metric=""):
480
+ titleStr = "Average " + metric
481
+ iStr = " {:<35} = {:0.3f}"
482
+ s = self.eval[metric]
483
+ print(iStr.format(titleStr, s))
484
+ return s
485
+
486
+ def _summarizeDets():
487
+ stats = []
488
+
489
+ for metric in CGF1_METRICS:
490
+ if metric.image_level:
491
+ stats.append(_summarize_single(metric=metric.name))
492
+ else:
493
+ stats.append(
494
+ _summarize(iouThr=metric.iou_threshold, metric=metric.name)
495
+ )
496
+ return np.asarray(stats)
497
+
498
+ summarize = _summarizeDets
499
+ self.stats = summarize()
500
+
501
+
502
+ def _evaluate(self):
503
+ """
504
+ Run per image evaluation on given images and store results (a list of dict) in self.evalImgs
505
+ """
506
+ p = self.params
507
+ # add backward compatibility if useSegm is specified in params
508
+ p.imgIds = list(np.unique(p.imgIds))
509
+ p.useCats = False
510
+ p.maxDets = sorted(p.maxDets)
511
+ self.params = p
512
+
513
+ self._prepare()
514
+ # loop through images, area range, max detection number
515
+ catIds = [-1]
516
+
517
+ if p.iouType == "segm" or p.iouType == "bbox":
518
+ computeIoU = self.computeIoU
519
+ else:
520
+ raise RuntimeError(f"Unsupported iou {p.iouType}")
521
+ self.ious = {
522
+ (imgId, catId): computeIoU(imgId, catId)
523
+ for imgId in p.imgIds
524
+ for catId in catIds
525
+ }
526
+
527
+ maxDet = p.maxDets[-1]
528
+ evalImgs = [
529
+ self.evaluateImg(imgId, catId, areaRng, maxDet)
530
+ for catId in catIds
531
+ for areaRng in p.areaRng
532
+ for imgId in p.imgIds
533
+ ]
534
+ # this is NOT in the pycocotools code, but could be done outside
535
+ evalImgs = np.asarray(evalImgs).reshape(len(catIds), len(p.areaRng), len(p.imgIds))
536
+ return p.imgIds, evalImgs
537
+
538
+
539
+ class CGF1Evaluator:
540
+ """
541
+ Wrapper class for cgF1 evaluation.
542
+ This supports the oracle setting (when several ground-truths are available per image)
543
+ """
544
+
545
+ def __init__(
546
+ self,
547
+ gt_path: Union[str, List[str]],
548
+ iou_type="segm",
549
+ verbose=False,
550
+ ):
551
+ """
552
+ Args:
553
+ gt_path (str or list of str): path(s) to ground truth COCO json file(s)
554
+ iou_type (str): type of IoU to evaluate
555
+ threshold (float): threshold for predictions
556
+ """
557
+ self.gt_paths = gt_path if isinstance(gt_path, list) else [gt_path]
558
+ self.iou_type = iou_type
559
+
560
+ self.coco_gts = [COCOCustom(gt) for gt in self.gt_paths]
561
+
562
+ self.verbose = verbose
563
+
564
+ self.coco_evals = []
565
+ for i, coco_gt in enumerate(self.coco_gts):
566
+ self.coco_evals.append(
567
+ CGF1Eval(
568
+ coco_gt=coco_gt,
569
+ iouType=iou_type,
570
+ )
571
+ )
572
+ self.coco_evals[i].useCats = False
573
+
574
+ exclude_img_ids = set()
575
+ # exclude_img_ids are the ids that are not exhaustively annotated in any of the other gts
576
+ for coco_gt in self.coco_gts[1:]:
577
+ exclude_img_ids = exclude_img_ids.union(
578
+ {
579
+ img["id"]
580
+ for img in coco_gt.dataset["images"]
581
+ if not img["is_instance_exhaustive"]
582
+ }
583
+ )
584
+ # we only eval on instance exhaustive queries
585
+ self.eval_img_ids = [
586
+ img["id"]
587
+ for img in self.coco_gts[0].dataset["images"]
588
+ if (img["is_instance_exhaustive"] and img["id"] not in exclude_img_ids)
589
+ ]
590
+
591
+ def evaluate(self, pred_file: str):
592
+ """
593
+ Evaluate the detections using cgF1 metric.
594
+
595
+ Args:
596
+ pred_file: path to the predictions COCO json file
597
+
598
+ """
599
+ assert len(self.coco_gts) > 0, "No ground truth provided for evaluation."
600
+ assert len(self.coco_gts) == len(
601
+ self.coco_evals
602
+ ), "Mismatch in number of ground truths and evaluators."
603
+
604
+ if self.verbose:
605
+ print(f"Loading predictions from {pred_file}")
606
+
607
+ with open(pred_file, "r") as f:
608
+ preds = json.load(f)
609
+
610
+ if self.verbose:
611
+ print(f"Loaded {len(preds)} predictions")
612
+
613
+ img2preds = defaultdict(list)
614
+ for pred in preds:
615
+ img2preds[pred["image_id"]].append(pred)
616
+
617
+ all_eval_imgs = []
618
+ for img_id in tqdm(self.eval_img_ids, disable=not self.verbose):
619
+ results = img2preds[img_id]
620
+ all_scorings = []
621
+ for cur_coco_gt, coco_eval in zip(self.coco_gts, self.coco_evals):
622
+ # suppress pycocotools prints
623
+ with open(os.devnull, "w") as devnull:
624
+ with contextlib.redirect_stdout(devnull):
625
+ coco_dt = (
626
+ cur_coco_gt.loadRes(results) if results else COCOCustom()
627
+ )
628
+
629
+ coco_eval.cocoDt = coco_dt
630
+ coco_eval.params.imgIds = [img_id]
631
+ coco_eval.params.useCats = False
632
+ img_ids, eval_imgs = _evaluate(coco_eval)
633
+ all_scorings.append(eval_imgs)
634
+ selected = self._select_best_scoring(all_scorings)
635
+ all_eval_imgs.append(selected)
636
+
637
+ # After this point, we have selected the best scoring per image among several ground truths
638
+ # we can now accumulate and summarize, using only the first coco_eval
639
+
640
+ self.coco_evals[0].evalImgs = list(
641
+ np.concatenate(all_eval_imgs, axis=2).flatten()
642
+ )
643
+ self.coco_evals[0].params.imgIds = self.eval_img_ids
644
+ self.coco_evals[0]._paramsEval = copy.deepcopy(self.coco_evals[0].params)
645
+
646
+ if self.verbose:
647
+ print(f"Accumulating results")
648
+ self.coco_evals[0].accumulate()
649
+ print("cgF1 metric, IoU type={}".format(self.iou_type))
650
+ self.coco_evals[0].summarize()
651
+ print()
652
+
653
+ out = {}
654
+ for i, value in enumerate(self.coco_evals[0].stats):
655
+ name = CGF1_METRICS[i].name
656
+ if CGF1_METRICS[i].iou_threshold is not None:
657
+ name = f"{name}@{CGF1_METRICS[i].iou_threshold}"
658
+ out[f"cgF1_eval_{self.iou_type}_{name}"] = float(value)
659
+
660
+ return out
661
+
662
+ @staticmethod
663
+ def _select_best_scoring(scorings):
664
+ # This function is used for "oracle" type evaluation.
665
+ # It accepts the evaluation results with respect to several ground truths, and picks the best
666
+ if len(scorings) == 1:
667
+ return scorings[0]
668
+
669
+ assert (
670
+ scorings[0].ndim == 3
671
+ ), f"Expecting results in [numCats, numAreas, numImgs] format, got {scorings[0].shape}"
672
+ assert (
673
+ scorings[0].shape[0] == 1
674
+ ), f"Expecting a single category, got {scorings[0].shape[0]}"
675
+
676
+ for scoring in scorings:
677
+ assert (
678
+ scoring.shape == scorings[0].shape
679
+ ), f"Shape mismatch: {scoring.shape}, {scorings[0].shape}"
680
+
681
+ selected_imgs = []
682
+ for img_id in range(scorings[0].shape[-1]):
683
+ best = scorings[0][:, :, img_id]
684
+
685
+ for scoring in scorings[1:]:
686
+ current = scoring[:, :, img_id]
687
+ if "local_F1s" in best[0, 0] and "local_F1s" in current[0, 0]:
688
+ # we were able to compute a F1 score for this particular image in both evaluations
689
+ # best["local_F1s"] contains the results at various IoU thresholds. We simply take the average for comparision
690
+ best_score = best[0, 0]["local_F1s"].mean()
691
+ current_score = current[0, 0]["local_F1s"].mean()
692
+ if current_score > best_score:
693
+ best = current
694
+
695
+ else:
696
+ # If we're here, it means that in that in some evaluation we were not able to get a valid local F1
697
+ # This happens when both the predictions and targets are empty. In that case, we can assume it's a perfect prediction
698
+ if "local_F1s" not in current[0, 0]:
699
+ best = current
700
+ selected_imgs.append(best)
701
+ result = np.stack(selected_imgs, axis=-1)
702
+ assert result.shape == scorings[0].shape
703
+ return result
source_code/sam3/sam3/eval/conversion_util.py ADDED
@@ -0,0 +1,211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
2
+ import json
3
+ import os
4
+ from collections import defaultdict
5
+
6
+ from tqdm import tqdm
7
+
8
+
9
+ def convert_ytbvis_to_cocovid_gt(ann_json, save_path=None):
10
+ """Convert YouTube VIS dataset to COCO-style video instance segmentation format.
11
+
12
+ Args:
13
+ ann_json (str): Path to YouTube VIS annotation JSON file
14
+ save_path (str): path to save converted COCO-style JSON
15
+ """
16
+ # Initialize COCO structure
17
+ VIS = {
18
+ "info": {},
19
+ "images": [],
20
+ "videos": [],
21
+ "tracks": [],
22
+ "annotations": [],
23
+ "categories": [],
24
+ "licenses": [],
25
+ }
26
+
27
+ # Load original annotations
28
+ official_anns = json.load(open(ann_json))
29
+ VIS["categories"] = official_anns["categories"] # Direct copy categories
30
+
31
+ # Initialize counters
32
+ records = dict(img_id=1, ann_id=1)
33
+
34
+ # Create video-to-annotations mapping
35
+ vid_to_anns = defaultdict(list)
36
+ for ann in official_anns["annotations"]:
37
+ vid_to_anns[ann["video_id"]].append(ann)
38
+
39
+ # Create tracks directly
40
+ VIS["tracks"] = [
41
+ {
42
+ "id": ann["id"],
43
+ "category_id": ann["category_id"],
44
+ "video_id": ann["video_id"],
45
+ }
46
+ for ann in official_anns["annotations"]
47
+ ]
48
+
49
+ # Process videos
50
+ for video_info in tqdm(official_anns["videos"]):
51
+ # Create video entry
52
+ video = {
53
+ "id": video_info["id"],
54
+ "name": os.path.dirname(video_info["file_names"][0]),
55
+ "width": video_info["width"],
56
+ "height": video_info["height"],
57
+ "length": video_info["length"],
58
+ "neg_category_ids": [],
59
+ "not_exhaustive_category_ids": [],
60
+ }
61
+ VIS["videos"].append(video)
62
+
63
+ # Process frames
64
+ num_frames = len(video_info["file_names"])
65
+ for frame_idx in range(num_frames):
66
+ # Create image entry
67
+ image = {
68
+ "id": records["img_id"],
69
+ "video_id": video_info["id"],
70
+ "file_name": video_info["file_names"][frame_idx],
71
+ "width": video_info["width"],
72
+ "height": video_info["height"],
73
+ "frame_index": frame_idx,
74
+ "frame_id": frame_idx,
75
+ }
76
+ VIS["images"].append(image)
77
+
78
+ # Process annotations for this frame
79
+ if video_info["id"] in vid_to_anns:
80
+ for ann in vid_to_anns[video_info["id"]]:
81
+ bbox = ann["bboxes"][frame_idx]
82
+ if bbox is None:
83
+ continue
84
+
85
+ # Create annotation entry
86
+ annotation = {
87
+ "id": records["ann_id"],
88
+ "video_id": video_info["id"],
89
+ "image_id": records["img_id"],
90
+ "track_id": ann["id"],
91
+ "category_id": ann["category_id"],
92
+ "bbox": bbox,
93
+ "area": ann["areas"][frame_idx],
94
+ "segmentation": ann["segmentations"][frame_idx],
95
+ "iscrowd": ann["iscrowd"],
96
+ }
97
+ VIS["annotations"].append(annotation)
98
+ records["ann_id"] += 1
99
+
100
+ records["img_id"] += 1
101
+
102
+ # Print summary
103
+ print(f"Converted {len(VIS['videos'])} videos")
104
+ print(f"Converted {len(VIS['images'])} images")
105
+ print(f"Created {len(VIS['tracks'])} tracks")
106
+ print(f"Created {len(VIS['annotations'])} annotations")
107
+
108
+ if save_path is None:
109
+ return VIS
110
+
111
+ # Save output
112
+ save_dir = os.path.dirname(save_path)
113
+ os.makedirs(save_dir, exist_ok=True)
114
+ json.dump(VIS, open(save_path, "w"))
115
+
116
+ return VIS
117
+
118
+
119
+ def convert_ytbvis_to_cocovid_pred(
120
+ youtubevis_pred_path: str, converted_dataset_path: str, output_path: str
121
+ ) -> None:
122
+ """
123
+ Convert YouTubeVIS predictions to COCO format with video_id preservation
124
+
125
+ Args:
126
+ youtubevis_pred_path: Path to YouTubeVIS prediction JSON
127
+ converted_dataset_path: Path to converted COCO dataset JSON
128
+ output_path: Path to save COCO format predictions
129
+ """
130
+
131
+ # Load YouTubeVIS predictions
132
+ with open(youtubevis_pred_path) as f:
133
+ ytv_predictions = json.load(f)
134
+
135
+ # Load converted dataset for image ID mapping
136
+ with open(converted_dataset_path) as f:
137
+ coco_dataset = json.load(f)
138
+
139
+ # Create (video_id, frame_idx) -> image_id mapping
140
+ image_id_map = {
141
+ (img["video_id"], img["frame_index"]): img["id"]
142
+ for img in coco_dataset["images"]
143
+ }
144
+
145
+ coco_annotations = []
146
+ track_id_counter = 1 # Unique track ID generator
147
+
148
+ for pred in tqdm(ytv_predictions):
149
+ video_id = pred["video_id"]
150
+ category_id = pred["category_id"]
151
+ bboxes = pred["bboxes"]
152
+ segmentations = pred.get("segmentations", []) # Get segmentations if available
153
+ areas = pred.get("areas", []) # Get areas if available
154
+ score = pred["score"]
155
+
156
+ # Assign unique track ID for this prediction
157
+ track_id = track_id_counter
158
+ track_id_counter += 1
159
+
160
+ # Ensure segmentations and areas have the same length as bboxes
161
+ if len(segmentations) == 0:
162
+ segmentations = [None] * len(bboxes)
163
+ if len(areas) == 0:
164
+ areas = [None] * len(bboxes)
165
+
166
+ for frame_idx, (bbox, segmentation, area_from_pred) in enumerate(
167
+ zip(bboxes, segmentations, areas)
168
+ ):
169
+ # Skip frames with missing objects (None or zero bbox)
170
+ if bbox is None or all(x == 0 for x in bbox):
171
+ continue
172
+
173
+ # Get corresponding image ID from mapping
174
+ image_id = image_id_map.get((video_id, frame_idx))
175
+ if image_id is None:
176
+ raise RuntimeError(
177
+ f"prediction {video_id=}, {frame_idx=} does not match any images in the converted COCO format"
178
+ )
179
+
180
+ # Extract bbox coordinates
181
+ x, y, w, h = bbox
182
+
183
+ # Calculate area - use area from prediction if available, otherwise from bbox
184
+ if area_from_pred is not None and area_from_pred > 0:
185
+ area = area_from_pred
186
+ else:
187
+ area = w * h
188
+
189
+ # Create COCO annotation with video_id
190
+ coco_annotation = {
191
+ "image_id": int(image_id),
192
+ "video_id": video_id, # Added video_id field
193
+ "track_id": track_id,
194
+ "category_id": category_id,
195
+ "bbox": [float(x), float(y), float(w), float(h)],
196
+ "area": float(area),
197
+ "iscrowd": 0,
198
+ "score": float(score),
199
+ }
200
+
201
+ # Add segmentation if available
202
+ if segmentation is not None:
203
+ coco_annotation["segmentation"] = segmentation
204
+
205
+ coco_annotations.append(coco_annotation)
206
+
207
+ # Save output
208
+ with open(output_path, "w") as f:
209
+ json.dump(coco_annotations, f)
210
+
211
+ print(f"Converted {len(coco_annotations)} predictions to COCO format with video_id")
source_code/sam3/sam3/eval/hota_eval_toolkit/run_ytvis_eval.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa
2
+
3
+ """run_youtube_vis.py
4
+ Run example:
5
+ run_youtube_vis.py --USE_PARALLEL False --METRICS HOTA --TRACKERS_TO_EVAL STEm_Seg
6
+ Command Line Arguments: Defaults, # Comments
7
+ Eval arguments:
8
+ 'USE_PARALLEL': False,
9
+ 'NUM_PARALLEL_CORES': 8,
10
+ 'BREAK_ON_ERROR': True, # Raises exception and exits with error
11
+ 'RETURN_ON_ERROR': False, # if not BREAK_ON_ERROR, then returns from function on error
12
+ 'LOG_ON_ERROR': os.path.join(code_path, 'error_log.txt'), # if not None, save any errors into a log file.
13
+ 'PRINT_RESULTS': True,
14
+ 'PRINT_ONLY_COMBINED': False,
15
+ 'PRINT_CONFIG': True,
16
+ 'TIME_PROGRESS': True,
17
+ 'DISPLAY_LESS_PROGRESS': True,
18
+ 'OUTPUT_SUMMARY': True,
19
+ 'OUTPUT_EMPTY_CLASSES': True, # If False, summary files are not output for classes with no detections
20
+ 'OUTPUT_DETAILED': True,
21
+ 'PLOT_CURVES': True,
22
+ Dataset arguments:
23
+ 'GT_FOLDER': os.path.join(code_path, 'data/gt/youtube_vis/youtube_vis_training'), # Location of GT data
24
+ 'TRACKERS_FOLDER': os.path.join(code_path, 'data/trackers/youtube_vis/youtube_vis_training'),
25
+ # Trackers location
26
+ 'OUTPUT_FOLDER': None, # Where to save eval results (if None, same as TRACKERS_FOLDER)
27
+ 'TRACKERS_TO_EVAL': None, # Filenames of trackers to eval (if None, all in folder)
28
+ 'CLASSES_TO_EVAL': None, # Classes to eval (if None, all classes)
29
+ 'SPLIT_TO_EVAL': 'training', # Valid: 'training', 'val'
30
+ 'PRINT_CONFIG': True, # Whether to print current config
31
+ 'OUTPUT_SUB_FOLDER': '', # Output files are saved in OUTPUT_FOLDER/tracker_name/OUTPUT_SUB_FOLDER
32
+ 'TRACKER_SUB_FOLDER': 'data', # Tracker files are in TRACKER_FOLDER/tracker_name/TRACKER_SUB_FOLDER
33
+ 'TRACKER_DISPLAY_NAMES': None, # Names of trackers to display, if None: TRACKERS_TO_EVAL
34
+ Metric arguments:
35
+ 'METRICS': ['TrackMAP', 'HOTA', 'CLEAR', 'Identity']
36
+ """
37
+
38
+ import argparse
39
+ import os
40
+ import sys
41
+ from multiprocessing import freeze_support
42
+
43
+ from . import trackeval
44
+
45
+
46
+ def run_ytvis_eval(args=None, gt_json=None, dt_json=None):
47
+ # Command line interface:
48
+ default_eval_config = trackeval.Evaluator.get_default_eval_config()
49
+ # print only combined since TrackMAP is undefined for per sequence breakdowns
50
+ default_eval_config["PRINT_ONLY_COMBINED"] = True
51
+ default_dataset_config = trackeval.datasets.YouTubeVIS.get_default_dataset_config()
52
+ default_metrics_config = {"METRICS": ["HOTA"]}
53
+ config = {
54
+ **default_eval_config,
55
+ **default_dataset_config,
56
+ **default_metrics_config,
57
+ } # Merge default configs
58
+ parser = argparse.ArgumentParser()
59
+ for setting in config.keys():
60
+ if type(config[setting]) == list or type(config[setting]) == type(None):
61
+ parser.add_argument("--" + setting, nargs="+")
62
+ else:
63
+ parser.add_argument("--" + setting)
64
+ args = parser.parse_args(args).__dict__
65
+ for setting in args.keys():
66
+ if args[setting] is not None:
67
+ if type(config[setting]) == type(True):
68
+ if args[setting] == "True":
69
+ x = True
70
+ elif args[setting] == "False":
71
+ x = False
72
+ else:
73
+ raise Exception(
74
+ "Command line parameter " + setting + "must be True or False"
75
+ )
76
+ elif type(config[setting]) == type(1):
77
+ x = int(args[setting])
78
+ elif type(args[setting]) == type(None):
79
+ x = None
80
+ else:
81
+ x = args[setting]
82
+ config[setting] = x
83
+ eval_config = {k: v for k, v in config.items() if k in default_eval_config.keys()}
84
+ dataset_config = {
85
+ k: v for k, v in config.items() if k in default_dataset_config.keys()
86
+ }
87
+ metrics_config = {
88
+ k: v for k, v in config.items() if k in default_metrics_config.keys()
89
+ }
90
+
91
+ # Run code
92
+ evaluator = trackeval.Evaluator(eval_config)
93
+ # allow directly specifying the GT JSON data and Tracker (result)
94
+ # JSON data as Python objects, without reading from files.
95
+ dataset_config["GT_JSON_OBJECT"] = gt_json
96
+ dataset_config["TRACKER_JSON_OBJECT"] = dt_json
97
+ dataset_list = [trackeval.datasets.YouTubeVIS(dataset_config)]
98
+ metrics_list = []
99
+ # for metric in [trackeval.metrics.TrackMAP, trackeval.metrics.HOTA, trackeval.metrics.CLEAR,
100
+ # trackeval.metrics.Identity]:
101
+ for metric in [trackeval.metrics.HOTA]:
102
+ if metric.get_name() in metrics_config["METRICS"]:
103
+ metrics_list.append(metric())
104
+ if len(metrics_list) == 0:
105
+ raise Exception("No metrics selected for evaluation")
106
+ output_res, output_msg = evaluator.evaluate(dataset_list, metrics_list)
107
+ return output_res, output_msg
108
+
109
+
110
+ if __name__ == "__main__":
111
+ import sys
112
+
113
+ freeze_support()
114
+ run_ytvis_eval(sys.argv[1:])
source_code/sam3/sam3/eval/hota_eval_toolkit/trackeval/datasets/_base_dataset.py ADDED
@@ -0,0 +1,379 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # flake8: noqa
2
+
3
+ import csv
4
+ import io
5
+ import os
6
+ import traceback
7
+ import zipfile
8
+ from abc import ABC, abstractmethod
9
+ from copy import deepcopy
10
+
11
+ import numpy as np
12
+
13
+ from .. import _timing
14
+ from ..utils import TrackEvalException
15
+
16
+
17
+ class _BaseDataset(ABC):
18
+ @abstractmethod
19
+ def __init__(self):
20
+ self.tracker_list = None
21
+ self.seq_list = None
22
+ self.class_list = None
23
+ self.output_fol = None
24
+ self.output_sub_fol = None
25
+ self.should_classes_combine = True
26
+ self.use_super_categories = False
27
+
28
+ # Functions to implement:
29
+
30
+ @staticmethod
31
+ @abstractmethod
32
+ def get_default_dataset_config(): ...
33
+
34
+ @abstractmethod
35
+ def _load_raw_file(self, tracker, seq, is_gt): ...
36
+
37
+ @_timing.time
38
+ @abstractmethod
39
+ def get_preprocessed_seq_data(self, raw_data, cls): ...
40
+
41
+ @abstractmethod
42
+ def _calculate_similarities(self, gt_dets_t, tracker_dets_t): ...
43
+
44
+ # Helper functions for all datasets:
45
+
46
+ @classmethod
47
+ def get_class_name(cls):
48
+ return cls.__name__
49
+
50
+ def get_name(self):
51
+ return self.get_class_name()
52
+
53
+ def get_output_fol(self, tracker):
54
+ return os.path.join(self.output_fol, tracker, self.output_sub_fol)
55
+
56
+ def get_display_name(self, tracker):
57
+ """Can be overwritten if the trackers name (in files) is different to how it should be displayed.
58
+ By default this method just returns the trackers name as is.
59
+ """
60
+ return tracker
61
+
62
+ def get_eval_info(self):
63
+ """Return info about the dataset needed for the Evaluator"""
64
+ return self.tracker_list, self.seq_list, self.class_list
65
+
66
+ @_timing.time
67
+ def get_raw_seq_data(self, tracker, seq):
68
+ """Loads raw data (tracker and ground-truth) for a single tracker on a single sequence.
69
+ Raw data includes all of the information needed for both preprocessing and evaluation, for all classes.
70
+ A later function (get_processed_seq_data) will perform such preprocessing and extract relevant information for
71
+ the evaluation of each class.
72
+
73
+ This returns a dict which contains the fields:
74
+ [num_timesteps]: integer
75
+ [gt_ids, tracker_ids, gt_classes, tracker_classes, tracker_confidences]:
76
+ list (for each timestep) of 1D NDArrays (for each det).
77
+ [gt_dets, tracker_dets, gt_crowd_ignore_regions]: list (for each timestep) of lists of detections.
78
+ [similarity_scores]: list (for each timestep) of 2D NDArrays.
79
+ [gt_extras]: dict (for each extra) of lists (for each timestep) of 1D NDArrays (for each det).
80
+
81
+ gt_extras contains dataset specific information used for preprocessing such as occlusion and truncation levels.
82
+
83
+ Note that similarities are extracted as part of the dataset and not the metric, because almost all metrics are
84
+ independent of the exact method of calculating the similarity. However datasets are not (e.g. segmentation
85
+ masks vs 2D boxes vs 3D boxes).
86
+ We calculate the similarity before preprocessing because often both preprocessing and evaluation require it and
87
+ we don't wish to calculate this twice.
88
+ We calculate similarity between all gt and tracker classes (not just each class individually) to allow for
89
+ calculation of metrics such as class confusion matrices. Typically the impact of this on performance is low.
90
+ """
91
+ # Load raw data.
92
+ raw_gt_data = self._load_raw_file(tracker, seq, is_gt=True)
93
+ raw_tracker_data = self._load_raw_file(tracker, seq, is_gt=False)
94
+ raw_data = {**raw_tracker_data, **raw_gt_data} # Merges dictionaries
95
+
96
+ # Calculate similarities for each timestep.
97
+ similarity_scores = []
98
+ for t, (gt_dets_t, tracker_dets_t) in enumerate(
99
+ zip(raw_data["gt_dets"], raw_data["tracker_dets"])
100
+ ):
101
+ ious = self._calculate_similarities(gt_dets_t, tracker_dets_t)
102
+ similarity_scores.append(ious)
103
+ raw_data["similarity_scores"] = similarity_scores
104
+ return raw_data
105
+
106
+ @staticmethod
107
+ def _load_simple_text_file(
108
+ file,
109
+ time_col=0,
110
+ id_col=None,
111
+ remove_negative_ids=False,
112
+ valid_filter=None,
113
+ crowd_ignore_filter=None,
114
+ convert_filter=None,
115
+ is_zipped=False,
116
+ zip_file=None,
117
+ force_delimiters=None,
118
+ ):
119
+ """Function that loads data which is in a commonly used text file format.
120
+ Assumes each det is given by one row of a text file.
121
+ There is no limit to the number or meaning of each column,
122
+ however one column needs to give the timestep of each det (time_col) which is default col 0.
123
+
124
+ The file dialect (deliminator, num cols, etc) is determined automatically.
125
+ This function automatically separates dets by timestep,
126
+ and is much faster than alternatives such as np.loadtext or pandas.
127
+
128
+ If remove_negative_ids is True and id_col is not None, dets with negative values in id_col are excluded.
129
+ These are not excluded from ignore data.
130
+
131
+ valid_filter can be used to only include certain classes.
132
+ It is a dict with ints as keys, and lists as values,
133
+ such that a row is included if "row[key].lower() is in value" for all key/value pairs in the dict.
134
+ If None, all classes are included.
135
+
136
+ crowd_ignore_filter can be used to read crowd_ignore regions separately. It has the same format as valid filter.
137
+
138
+ convert_filter can be used to convert value read to another format.
139
+ This is used most commonly to convert classes given as string to a class id.
140
+ This is a dict such that the key is the column to convert, and the value is another dict giving the mapping.
141
+
142
+ Optionally, input files could be a zip of multiple text files for storage efficiency.
143
+
144
+ Returns read_data and ignore_data.
145
+ Each is a dict (with keys as timesteps as strings) of lists (over dets) of lists (over column values).
146
+ Note that all data is returned as strings, and must be converted to float/int later if needed.
147
+ Note that timesteps will not be present in the returned dict keys if there are no dets for them
148
+ """
149
+
150
+ if remove_negative_ids and id_col is None:
151
+ raise TrackEvalException(
152
+ "remove_negative_ids is True, but id_col is not given."
153
+ )
154
+ if crowd_ignore_filter is None:
155
+ crowd_ignore_filter = {}
156
+ if convert_filter is None:
157
+ convert_filter = {}
158
+ try:
159
+ if is_zipped: # Either open file directly or within a zip.
160
+ if zip_file is None:
161
+ raise TrackEvalException(
162
+ "is_zipped set to True, but no zip_file is given."
163
+ )
164
+ archive = zipfile.ZipFile(os.path.join(zip_file), "r")
165
+ fp = io.TextIOWrapper(archive.open(file, "r"))
166
+ else:
167
+ fp = open(file)
168
+ read_data = {}
169
+ crowd_ignore_data = {}
170
+ fp.seek(0, os.SEEK_END)
171
+ # check if file is empty
172
+ if fp.tell():
173
+ fp.seek(0)
174
+ dialect = csv.Sniffer().sniff(
175
+ fp.readline(), delimiters=force_delimiters
176
+ ) # Auto determine structure.
177
+ dialect.skipinitialspace = (
178
+ True # Deal with extra spaces between columns
179
+ )
180
+ fp.seek(0)
181
+ reader = csv.reader(fp, dialect)
182
+ for row in reader:
183
+ try:
184
+ # Deal with extra trailing spaces at the end of rows
185
+ if row[-1] in "":
186
+ row = row[:-1]
187
+ timestep = str(int(float(row[time_col])))
188
+ # Read ignore regions separately.
189
+ is_ignored = False
190
+ for ignore_key, ignore_value in crowd_ignore_filter.items():
191
+ if row[ignore_key].lower() in ignore_value:
192
+ # Convert values in one column (e.g. string to id)
193
+ for (
194
+ convert_key,
195
+ convert_value,
196
+ ) in convert_filter.items():
197
+ row[convert_key] = convert_value[
198
+ row[convert_key].lower()
199
+ ]
200
+ # Save data separated by timestep.
201
+ if timestep in crowd_ignore_data.keys():
202
+ crowd_ignore_data[timestep].append(row)
203
+ else:
204
+ crowd_ignore_data[timestep] = [row]
205
+ is_ignored = True
206
+ if (
207
+ is_ignored
208
+ ): # if det is an ignore region, it cannot be a normal det.
209
+ continue
210
+ # Exclude some dets if not valid.
211
+ if valid_filter is not None:
212
+ for key, value in valid_filter.items():
213
+ if row[key].lower() not in value:
214
+ continue
215
+ if remove_negative_ids:
216
+ if int(float(row[id_col])) < 0:
217
+ continue
218
+ # Convert values in one column (e.g. string to id)
219
+ for convert_key, convert_value in convert_filter.items():
220
+ row[convert_key] = convert_value[row[convert_key].lower()]
221
+ # Save data separated by timestep.
222
+ if timestep in read_data.keys():
223
+ read_data[timestep].append(row)
224
+ else:
225
+ read_data[timestep] = [row]
226
+ except Exception:
227
+ exc_str_init = (
228
+ "In file %s the following line cannot be read correctly: \n"
229
+ % os.path.basename(file)
230
+ )
231
+ exc_str = " ".join([exc_str_init] + row)
232
+ raise TrackEvalException(exc_str)
233
+ fp.close()
234
+ except Exception:
235
+ print("Error loading file: %s, printing traceback." % file)
236
+ traceback.print_exc()
237
+ raise TrackEvalException(
238
+ "File %s cannot be read because it is either not present or invalidly formatted"
239
+ % os.path.basename(file)
240
+ )
241
+ return read_data, crowd_ignore_data
242
+
243
+ @staticmethod
244
+ def _calculate_mask_ious(masks1, masks2, is_encoded=False, do_ioa=False):
245
+ """Calculates the IOU (intersection over union) between two arrays of segmentation masks.
246
+ If is_encoded a run length encoding with pycocotools is assumed as input format, otherwise an input of numpy
247
+ arrays of the shape (num_masks, height, width) is assumed and the encoding is performed.
248
+ If do_ioa (intersection over area) , then calculates the intersection over the area of masks1 - this is commonly
249
+ used to determine if detections are within crowd ignore region.
250
+ :param masks1: first set of masks (numpy array of shape (num_masks, height, width) if not encoded,
251
+ else pycocotools rle encoded format)
252
+ :param masks2: second set of masks (numpy array of shape (num_masks, height, width) if not encoded,
253
+ else pycocotools rle encoded format)
254
+ :param is_encoded: whether the input is in pycocotools rle encoded format
255
+ :param do_ioa: whether to perform IoA computation
256
+ :return: the IoU/IoA scores
257
+ """
258
+
259
+ # Only loaded when run to reduce minimum requirements
260
+ from pycocotools import mask as mask_utils
261
+
262
+ # use pycocotools for run length encoding of masks
263
+ if not is_encoded:
264
+ masks1 = mask_utils.encode(
265
+ np.array(np.transpose(masks1, (1, 2, 0)), order="F")
266
+ )
267
+ masks2 = mask_utils.encode(
268
+ np.array(np.transpose(masks2, (1, 2, 0)), order="F")
269
+ )
270
+
271
+ # use pycocotools for iou computation of rle encoded masks
272
+ ious = mask_utils.iou(masks1, masks2, [do_ioa] * len(masks2))
273
+ if len(masks1) == 0 or len(masks2) == 0:
274
+ ious = np.asarray(ious).reshape(len(masks1), len(masks2))
275
+ assert (ious >= 0 - np.finfo("float").eps).all()
276
+ assert (ious <= 1 + np.finfo("float").eps).all()
277
+
278
+ return ious
279
+
280
+ @staticmethod
281
+ def _calculate_box_ious(bboxes1, bboxes2, box_format="xywh", do_ioa=False):
282
+ """Calculates the IOU (intersection over union) between two arrays of boxes.
283
+ Allows variable box formats ('xywh' and 'x0y0x1y1').
284
+ If do_ioa (intersection over area) , then calculates the intersection over the area of boxes1 - this is commonly
285
+ used to determine if detections are within crowd ignore region.
286
+ """
287
+ if box_format in "xywh":
288
+ # layout: (x0, y0, w, h)
289
+ bboxes1 = deepcopy(bboxes1)
290
+ bboxes2 = deepcopy(bboxes2)
291
+
292
+ bboxes1[:, 2] = bboxes1[:, 0] + bboxes1[:, 2]
293
+ bboxes1[:, 3] = bboxes1[:, 1] + bboxes1[:, 3]
294
+ bboxes2[:, 2] = bboxes2[:, 0] + bboxes2[:, 2]
295
+ bboxes2[:, 3] = bboxes2[:, 1] + bboxes2[:, 3]
296
+ elif box_format not in "x0y0x1y1":
297
+ raise (TrackEvalException("box_format %s is not implemented" % box_format))
298
+
299
+ # layout: (x0, y0, x1, y1)
300
+ min_ = np.minimum(bboxes1[:, np.newaxis, :], bboxes2[np.newaxis, :, :])
301
+ max_ = np.maximum(bboxes1[:, np.newaxis, :], bboxes2[np.newaxis, :, :])
302
+ intersection = np.maximum(min_[..., 2] - max_[..., 0], 0) * np.maximum(
303
+ min_[..., 3] - max_[..., 1], 0
304
+ )
305
+ area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * (
306
+ bboxes1[..., 3] - bboxes1[..., 1]
307
+ )
308
+
309
+ if do_ioa:
310
+ ioas = np.zeros_like(intersection)
311
+ valid_mask = area1 > 0 + np.finfo("float").eps
312
+ ioas[valid_mask, :] = (
313
+ intersection[valid_mask, :] / area1[valid_mask][:, np.newaxis]
314
+ )
315
+
316
+ return ioas
317
+ else:
318
+ area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * (
319
+ bboxes2[..., 3] - bboxes2[..., 1]
320
+ )
321
+ union = area1[:, np.newaxis] + area2[np.newaxis, :] - intersection
322
+ intersection[area1 <= 0 + np.finfo("float").eps, :] = 0
323
+ intersection[:, area2 <= 0 + np.finfo("float").eps] = 0
324
+ intersection[union <= 0 + np.finfo("float").eps] = 0
325
+ union[union <= 0 + np.finfo("float").eps] = 1
326
+ ious = intersection / union
327
+ return ious
328
+
329
+ @staticmethod
330
+ def _calculate_euclidean_similarity(dets1, dets2, zero_distance=2.0):
331
+ """Calculates the euclidean distance between two sets of detections, and then converts this into a similarity
332
+ measure with values between 0 and 1 using the following formula: sim = max(0, 1 - dist/zero_distance).
333
+ The default zero_distance of 2.0, corresponds to the default used in MOT15_3D, such that a 0.5 similarity
334
+ threshold corresponds to a 1m distance threshold for TPs.
335
+ """
336
+ dist = np.linalg.norm(dets1[:, np.newaxis] - dets2[np.newaxis, :], axis=2)
337
+ sim = np.maximum(0, 1 - dist / zero_distance)
338
+ return sim
339
+
340
+ @staticmethod
341
+ def _check_unique_ids(data, after_preproc=False):
342
+ """Check the requirement that the tracker_ids and gt_ids are unique per timestep"""
343
+ gt_ids = data["gt_ids"]
344
+ tracker_ids = data["tracker_ids"]
345
+ for t, (gt_ids_t, tracker_ids_t) in enumerate(zip(gt_ids, tracker_ids)):
346
+ if len(tracker_ids_t) > 0:
347
+ unique_ids, counts = np.unique(tracker_ids_t, return_counts=True)
348
+ if np.max(counts) != 1:
349
+ duplicate_ids = unique_ids[counts > 1]
350
+ exc_str_init = (
351
+ "Tracker predicts the same ID more than once in a single timestep "
352
+ "(seq: %s, frame: %i, ids:" % (data["seq"], t + 1)
353
+ )
354
+ exc_str = (
355
+ " ".join([exc_str_init] + [str(d) for d in duplicate_ids]) + ")"
356
+ )
357
+ if after_preproc:
358
+ exc_str_init += (
359
+ "\n Note that this error occurred after preprocessing (but not before), "
360
+ "so ids may not be as in file, and something seems wrong with preproc."
361
+ )
362
+ raise TrackEvalException(exc_str)
363
+ if len(gt_ids_t) > 0:
364
+ unique_ids, counts = np.unique(gt_ids_t, return_counts=True)
365
+ if np.max(counts) != 1:
366
+ duplicate_ids = unique_ids[counts > 1]
367
+ exc_str_init = (
368
+ "Ground-truth has the same ID more than once in a single timestep "
369
+ "(seq: %s, frame: %i, ids:" % (data["seq"], t + 1)
370
+ )
371
+ exc_str = (
372
+ " ".join([exc_str_init] + [str(d) for d in duplicate_ids]) + ")"
373
+ )
374
+ if after_preproc:
375
+ exc_str_init += (
376
+ "\n Note that this error occurred after preprocessing (but not before), "
377
+ "so ids may not be as in file, and something seems wrong with preproc."
378
+ )
379
+ raise TrackEvalException(exc_str)
source_code/sam3/sam3/eval/hota_eval_toolkit/trackeval/metrics/__init__.py ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # flake8: noqa
2
+
3
+ from .count import Count
4
+ from .hota import HOTA
source_code/sam3/sam3/eval/postprocessors.py ADDED
@@ -0,0 +1,648 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
2
+
3
+ """Postprocessors class to transform MDETR output according to the downstream task"""
4
+
5
+ import dataclasses
6
+ import logging
7
+ from collections import defaultdict
8
+ from typing import Dict, List, Optional
9
+
10
+ import numpy as np
11
+ import torch
12
+ from sam3.model import box_ops
13
+ from sam3.model.data_misc import BatchedInferenceMetadata, interpolate
14
+ from sam3.train.masks_ops import rle_encode, robust_rle_encode
15
+ from torch import nn
16
+
17
+
18
+ class PostProcessNullOp(nn.Module):
19
+ def __init__(self, **kwargs):
20
+ super(PostProcessNullOp).__init__()
21
+ pass
22
+
23
+ def forward(self, input):
24
+ pass
25
+
26
+ def process_results(self, **kwargs):
27
+ return kwargs["find_stages"]
28
+
29
+
30
+ class PostProcessImage(nn.Module):
31
+ """This module converts the model's output into the format expected by the coco api"""
32
+
33
+ def __init__(
34
+ self,
35
+ max_dets_per_img: int,
36
+ iou_type="bbox",
37
+ to_cpu: bool = True,
38
+ use_original_ids: bool = False,
39
+ use_original_sizes_box: bool = False,
40
+ use_original_sizes_mask: bool = False,
41
+ convert_mask_to_rle: bool = False,
42
+ always_interpolate_masks_on_gpu: bool = True,
43
+ use_presence: bool = True,
44
+ detection_threshold: float = -1.0,
45
+ ) -> None:
46
+ super().__init__()
47
+ self.max_dets_per_img = max_dets_per_img
48
+ self.iou_type = iou_type
49
+ self.to_cpu = to_cpu
50
+ self.convert_mask_to_rle = convert_mask_to_rle
51
+ self.always_interpolate_masks_on_gpu = always_interpolate_masks_on_gpu
52
+
53
+ self.use_presence = use_presence
54
+ self.detection_threshold = detection_threshold
55
+ self.use_original_ids = use_original_ids
56
+ self.use_original_sizes_box = use_original_sizes_box
57
+ self.use_original_sizes_mask = use_original_sizes_mask
58
+
59
+ @torch.no_grad()
60
+ def forward(
61
+ self,
62
+ outputs,
63
+ target_sizes_boxes,
64
+ target_sizes_masks,
65
+ forced_labels=None,
66
+ consistent=False,
67
+ ret_tensordict: bool = False, # This is experimental
68
+ ):
69
+ """Perform the computation
70
+ Parameters:
71
+ outputs: raw outputs of the model
72
+ target_sizes_boxes: tensor of dimension [batch_size x 2] containing the size of each images of the batch
73
+ For evaluation, this must be the original image size (before any data augmentation)
74
+ For visualization, this should be the image size after data augment, but before padding
75
+ target_sizes_masks: same but used to resize masks
76
+ forced_labels: tensor of dimension [batch_size] containing the label to force for each image of the batch
77
+ This is useful when evaluating the model using standard metrics (eg on COCO, LVIS). In that case,
78
+ we query the model with every possible class label, so we when we pass the predictions to the evaluator,
79
+ we want to make sure that the predicted "class" matches the one that was queried.
80
+ consistent: whether all target sizes are equal
81
+ ret_tensordict: Experimental argument. If true, return a tensordict.TensorDict instead of a list of dictionaries for easier manipulation.
82
+ """
83
+ if ret_tensordict:
84
+ assert (
85
+ consistent is True
86
+ ), "We don't support returning TensorDict if the outputs have different shapes" # NOTE: It's possible but we don't support it.
87
+ assert self.detection_threshold <= 0.0, "TODO: implement?"
88
+ try:
89
+ from tensordict import TensorDict
90
+ except ImportError:
91
+ logging.info(
92
+ "tensordict is not installed. Install by running `pip install tensordict --no-deps`. Falling back by setting `ret_tensordict=False`"
93
+ )
94
+ ret_tensordict = False
95
+
96
+ out_bbox = outputs["pred_boxes"] if "pred_boxes" in outputs else None
97
+ out_logits = outputs["pred_logits"]
98
+ pred_masks = outputs["pred_masks"] if self.iou_type == "segm" else None
99
+ out_probs = out_logits.sigmoid()
100
+ if self.use_presence:
101
+ presence_score = outputs["presence_logit_dec"].sigmoid().unsqueeze(1)
102
+ out_probs = out_probs * presence_score
103
+
104
+ assert target_sizes_boxes.shape[1] == 2
105
+ assert target_sizes_masks.shape[1] == 2
106
+ batch_size = target_sizes_boxes.shape[0]
107
+
108
+ boxes, scores, labels, keep = self._process_boxes_and_labels(
109
+ target_sizes_boxes, forced_labels, out_bbox, out_probs
110
+ )
111
+ assert boxes is None or len(boxes) == batch_size
112
+ out_masks = self._process_masks(
113
+ target_sizes_masks, pred_masks, consistent=consistent, keep=keep
114
+ )
115
+ del pred_masks
116
+
117
+ if boxes is None:
118
+ assert out_masks is not None
119
+ assert not ret_tensordict, "We don't support returning TensorDict if the output does not contain boxes"
120
+ B = len(out_masks)
121
+ boxes = [None] * B
122
+ scores = [None] * B
123
+ labels = [None] * B
124
+
125
+ results = {
126
+ "scores": scores,
127
+ "labels": labels,
128
+ "boxes": boxes,
129
+ }
130
+ if out_masks is not None:
131
+ if self.convert_mask_to_rle:
132
+ results.update(masks_rle=out_masks)
133
+ else:
134
+ results.update(masks=out_masks)
135
+
136
+ if ret_tensordict:
137
+ results = TensorDict(results).auto_batch_size_()
138
+ if self.to_cpu:
139
+ results = results.cpu()
140
+ else:
141
+ # Convert a dictonary of lists/tensors to list of dictionaries
142
+ results = [
143
+ dict(zip(results.keys(), res_tuple))
144
+ for res_tuple in zip(*results.values())
145
+ ]
146
+
147
+ return results
148
+
149
+ def _process_masks(self, target_sizes, pred_masks, consistent=True, keep=None):
150
+ if pred_masks is None:
151
+ return None
152
+ if self.always_interpolate_masks_on_gpu:
153
+ gpu_device = target_sizes.device
154
+ assert gpu_device.type == "cuda"
155
+ pred_masks = pred_masks.to(device=gpu_device)
156
+ if consistent:
157
+ assert keep is None, "TODO: implement?"
158
+ # All masks should have the same shape, expected when processing a batch of size 1
159
+ target_size = target_sizes.unique(dim=0)
160
+ assert target_size.size(0) == 1, "Expecting all target sizes to be equal"
161
+ out_masks = (
162
+ interpolate(
163
+ pred_masks,
164
+ target_size.squeeze().tolist(),
165
+ mode="bilinear",
166
+ align_corners=False,
167
+ ).sigmoid()
168
+ > 0.5
169
+ )
170
+ if self.convert_mask_to_rle:
171
+ raise RuntimeError("TODO: implement?")
172
+ if self.to_cpu:
173
+ out_masks = out_masks.cpu()
174
+ else:
175
+ out_masks = [[]] * len(pred_masks)
176
+
177
+ assert keep is None or len(keep) == len(pred_masks)
178
+ for i, mask in enumerate(pred_masks):
179
+ h, w = target_sizes[i]
180
+ if keep is not None:
181
+ mask = mask[keep[i]]
182
+ # Uses the gpu version fist, moves masks to cpu if it fails"""
183
+ try:
184
+ interpolated = (
185
+ interpolate(
186
+ mask.unsqueeze(1),
187
+ (h, w),
188
+ mode="bilinear",
189
+ align_corners=False,
190
+ ).sigmoid()
191
+ > 0.5
192
+ )
193
+ except Exception as e:
194
+ logging.info("Issue found, reverting to CPU mode!")
195
+ mask_device = mask.device
196
+ mask = mask.cpu()
197
+ interpolated = (
198
+ interpolate(
199
+ mask.unsqueeze(1),
200
+ (h, w),
201
+ mode="bilinear",
202
+ align_corners=False,
203
+ ).sigmoid()
204
+ > 0.5
205
+ )
206
+ interpolated = interpolated.to(mask_device)
207
+
208
+ if self.convert_mask_to_rle:
209
+ out_masks[i] = robust_rle_encode(interpolated.squeeze(1))
210
+ else:
211
+ out_masks[i] = interpolated
212
+ if self.to_cpu:
213
+ out_masks[i] = out_masks[i].cpu()
214
+
215
+ return out_masks
216
+
217
+ def _process_boxes_and_labels(
218
+ self, target_sizes, forced_labels, out_bbox, out_probs
219
+ ):
220
+ if out_bbox is None:
221
+ return None, None, None, None
222
+ assert len(out_probs) == len(target_sizes)
223
+ if self.to_cpu:
224
+ out_probs = out_probs.cpu()
225
+ scores, labels = out_probs.max(-1)
226
+ if forced_labels is None:
227
+ labels = torch.ones_like(labels)
228
+ else:
229
+ labels = forced_labels[:, None].expand_as(labels)
230
+
231
+ # convert to [x0, y0, x1, y1] format
232
+ boxes = box_ops.box_cxcywh_to_xyxy(out_bbox)
233
+
234
+ img_h, img_w = target_sizes.unbind(1)
235
+ scale_fct = torch.stack([img_w, img_h, img_w, img_h], dim=1)
236
+ boxes = boxes * scale_fct[:, None, :]
237
+
238
+ if self.to_cpu:
239
+ boxes = boxes.cpu()
240
+
241
+ keep = None
242
+ if self.detection_threshold > 0:
243
+ # Filter out the boxes with scores below the detection threshold
244
+ keep = scores > self.detection_threshold
245
+ assert len(keep) == len(boxes) == len(scores) == len(labels)
246
+
247
+ boxes = [b[k.to(b.device)] for b, k in zip(boxes, keep)]
248
+ scores = [s[k.to(s.device)] for s, k in zip(scores, keep)]
249
+ labels = [l[k.to(l.device)] for l, k in zip(labels, keep)]
250
+
251
+ return boxes, scores, labels, keep
252
+
253
+ def process_results(
254
+ self, find_stages, find_metadatas: List[BatchedInferenceMetadata], **kwargs
255
+ ):
256
+ if find_stages.loss_stages is not None:
257
+ find_metadatas = [find_metadatas[i] for i in find_stages.loss_stages]
258
+ assert len(find_stages) == len(find_metadatas)
259
+ results = {}
260
+ for outputs, meta in zip(find_stages, find_metadatas):
261
+ img_size_for_boxes = (
262
+ meta.original_size
263
+ if self.use_original_sizes_box
264
+ else torch.ones_like(meta.original_size)
265
+ )
266
+ img_size_for_masks = (
267
+ meta.original_size
268
+ if self.use_original_sizes_mask
269
+ else torch.ones_like(meta.original_size)
270
+ )
271
+ detection_results = self(
272
+ outputs,
273
+ img_size_for_boxes,
274
+ img_size_for_masks,
275
+ forced_labels=(
276
+ meta.original_category_id if self.use_original_ids else None
277
+ ),
278
+ )
279
+ ids = (
280
+ meta.original_image_id if self.use_original_ids else meta.coco_image_id
281
+ )
282
+ assert len(detection_results) == len(ids)
283
+ for img_id, result in zip(ids, detection_results):
284
+ if img_id.item() not in results:
285
+ results[img_id.item()] = result
286
+ else:
287
+ assert set(results[img_id.item()].keys()) == set(result.keys())
288
+ for k in result.keys():
289
+ if isinstance(result[k], torch.Tensor):
290
+ results[img_id.item()][k] = torch.cat(
291
+ [results[img_id.item()][k], result[k]], dim=0
292
+ )
293
+ elif isinstance(result[k], list):
294
+ results[img_id.item()][k] += result[k]
295
+ else:
296
+ raise NotImplementedError(
297
+ f"Unexpected type {type(result[k])} in result."
298
+ )
299
+ # Prune the results to the max number of detections per image.
300
+ for img_id, result in results.items():
301
+ if (
302
+ self.max_dets_per_img > 0
303
+ and len(result["scores"]) > self.max_dets_per_img
304
+ ):
305
+ _, topk_indexes = torch.topk(
306
+ result["scores"], self.max_dets_per_img, dim=0
307
+ )
308
+ if self.to_cpu:
309
+ topk_indexes = topk_indexes.cpu()
310
+ for k in result.keys():
311
+ if isinstance(results[img_id][k], list):
312
+ results[img_id][k] = [
313
+ results[img_id][k][i] for i in topk_indexes.tolist()
314
+ ]
315
+ else:
316
+ results[img_id][k] = results[img_id][k].to(topk_indexes.device)[
317
+ topk_indexes
318
+ ]
319
+
320
+ return results
321
+
322
+
323
+ class PostProcessAPIVideo(PostProcessImage):
324
+ """This module converts the video model's output into the format expected by the YT-VIS api"""
325
+
326
+ def __init__(
327
+ self,
328
+ *args,
329
+ to_cpu: bool = True,
330
+ convert_mask_to_rle: bool = False,
331
+ always_interpolate_masks_on_gpu: bool = True,
332
+ prob_thresh: float = 0.5,
333
+ use_presence: bool = False,
334
+ **kwargs,
335
+ ):
336
+ super().__init__(
337
+ *args,
338
+ # Here we always set `convert_mask_to_rle=False` in the base `PostProcessAPI` class
339
+ # (so that its `_process_masks` won't return a list of RLEs). If we want to return
340
+ # RLEs for video masklets, we handle it in this `PostProcessAPIVideo` class instead.
341
+ convert_mask_to_rle=False,
342
+ # Here we always set `to_cpu=False` in the base `PostProcessAPI` class (so that
343
+ # the interpolated masks won't be automatically moved back to CPU). We will handle
344
+ # it in this `PostProcessAPIVideo` class instead.
345
+ always_interpolate_masks_on_gpu=always_interpolate_masks_on_gpu,
346
+ use_presence=use_presence,
347
+ **kwargs,
348
+ )
349
+ # Expected keys in the output dict to postprocess
350
+ self.EXPECTED_KEYS = [
351
+ "pred_logits",
352
+ "pred_boxes",
353
+ "pred_masks",
354
+ ]
355
+ # Whether to post-process video masklets (under packed representation) into RLE format
356
+ self.convert_mask_to_rle_for_video = convert_mask_to_rle
357
+ self.to_cpu_for_video = to_cpu
358
+ self.prob_thresh = prob_thresh
359
+
360
+ def process_results(
361
+ self, find_stages, find_metadatas: List[BatchedInferenceMetadata], **kwargs
362
+ ):
363
+ """
364
+ Tracking Postprocessor for SAM 3 video model.
365
+ This function takes in the output of the SAM 3 video model and processes it to extract all the tracklet predictions.
366
+ Args:
367
+ find_stages: A list of tensors representing the output of the SAM 3 video model.
368
+ find_metadatas: A list of BatchedInferenceMetadata objects containing metadata about each frame.
369
+ **kwargs: Additional keyword arguments.
370
+ Returns:
371
+ A dictionary of predcitions with video_id as key.
372
+ """
373
+
374
+ # Import tensordict here to avoid global dependency.
375
+ try:
376
+ from tensordict import TensorDict
377
+ except ImportError as e:
378
+ logging.error(
379
+ "tensordict is not installed, please install by running `pip install tensordict --no-deps`"
380
+ )
381
+ raise e
382
+ # Notes and assumptions:
383
+ # 1- This postprocessor assumes results only for a single video.
384
+ # 2- There are N stage outputs corresponding to N video frames
385
+ # 3- Each stage outputs contains PxQ preds, where P is number of prompts and Q is number of object queries. The output should also contain the tracking object ids corresponding to each object query.
386
+ # 4- The tracking object id has a default value of -1, indicating that the object query is not tracking any object in the frame, and hence its predictions can be ingored for a given frame.
387
+ # 5- Some objects may be tracked in a subset of frames only. So, we first extract the predictions in a packed representation (for efficient postprocessing -- specially memory)
388
+ # and then we convert the packed representation into a padded one, where we zero pad boxes/masks for objects that are not tracked in some frames.
389
+ # 6- We refer to objects by an object id, which is a tuple (prompt_idx, obj_id)
390
+
391
+ assert len(find_stages) > 0, "There is nothing to postprocess?"
392
+ PROMPT_AXIS, OBJ_QUERY_AXIS = (0, 1)
393
+ NO_OBJ_ID = -1
394
+ # Maps object ID -> [indices in packed tensor]
395
+ tracked_objects_packed_idx = defaultdict(list)
396
+ # Maps object ID -> [indices in padded tensor (abs frame index)]
397
+ tracked_objects_frame_idx = defaultdict(list)
398
+ total_num_preds = 0
399
+ # This will hold the packed representation of predictions.
400
+ vid_preds_packed: List[TensorDict] = []
401
+ vid_masklets_rle_packed: List[Optional[Dict]] = []
402
+ video_id = -1 # We assume single video postprocessing, this ID should be unique in the datapoint.
403
+
404
+ for frame_idx, (frame_outs, meta) in enumerate(
405
+ zip(find_stages, find_metadatas)
406
+ ):
407
+ # only store keys we need to extract the results
408
+ frame_outs_td = TensorDict(
409
+ {k: frame_outs[k] for k in self.EXPECTED_KEYS}
410
+ ).auto_batch_size_() # Shape is [P,Q,...]
411
+ meta_td = TensorDict(
412
+ dataclasses.asdict(meta)
413
+ ).auto_batch_size_() # Shape is [P,...]
414
+ unique_vid_id = meta.original_image_id.unique()
415
+ assert unique_vid_id.size(0) == 1
416
+ if video_id == -1:
417
+ video_id = unique_vid_id.item()
418
+ else:
419
+ assert (
420
+ video_id == unique_vid_id.item()
421
+ ), "We can only postprocess one video per datapoint"
422
+ # keeping track of which objects appear in the current frame
423
+ obj_ids_per_frame = frame_outs["pred_object_ids"]
424
+ assert obj_ids_per_frame.size(-1) == frame_outs["pred_logits"].size(-2)
425
+ if self.prob_thresh is not None:
426
+ # only keep the predictions on this frame with probability above the threshold
427
+ # (remove those predictions during the keep-alive period of a tracking query,
428
+ # where its "pred_object_ids" is still the tracked object ID rather than -1)
429
+ pred_probs = frame_outs["pred_logits"].sigmoid().squeeze(-1)
430
+ obj_ids_per_frame = torch.where(
431
+ pred_probs >= self.prob_thresh, obj_ids_per_frame, NO_OBJ_ID
432
+ )
433
+ tracked_obj_ids_idx = torch.where(obj_ids_per_frame != NO_OBJ_ID)
434
+ # Object id is a tuple of (prompt_idx, obj_id). This is because the model can assign same obj_id for two different prompts.
435
+ tracked_obj_ids = [
436
+ (p_id.item(), obj_ids_per_frame[p_id, q_id].item())
437
+ for p_id, q_id in zip(
438
+ tracked_obj_ids_idx[PROMPT_AXIS],
439
+ tracked_obj_ids_idx[OBJ_QUERY_AXIS],
440
+ )
441
+ ]
442
+ if len(tracked_obj_ids) == 0:
443
+ continue
444
+ # For each object, we keep track of the packed and padded (frame index) indices
445
+ for oid in tracked_obj_ids:
446
+ tracked_objects_packed_idx[oid].append(total_num_preds)
447
+ tracked_objects_frame_idx[oid].append(frame_idx)
448
+ total_num_preds += 1
449
+
450
+ # Since we have P*Q masks per frame, mask interpolation is the GPU memory bottleneck or time bottleneck in case of cpu processing.
451
+ # Instead, we first extract results only for tracked objects, reducing the number of masks to K = sum_i(tracked_objs_per_ith_prompt), hopefully <<< P*Q
452
+ tracked_objs_outs_td = frame_outs_td[
453
+ tracked_obj_ids_idx
454
+ ] # [P,Q,...] --> [K,...]
455
+ meta_td = meta_td[tracked_obj_ids_idx[PROMPT_AXIS].cpu()]
456
+ if self.always_interpolate_masks_on_gpu:
457
+ gpu_device = meta_td["original_size"].device
458
+ assert gpu_device.type == "cuda"
459
+ tracked_objs_outs_td = tracked_objs_outs_td.to(device=gpu_device)
460
+ frame_results_td = self(
461
+ tracked_objs_outs_td.unsqueeze(1),
462
+ (
463
+ meta_td["original_size"]
464
+ if self.use_original_sizes
465
+ else torch.ones_like(meta_td["original_size"])
466
+ ),
467
+ forced_labels=(
468
+ meta_td["original_category_id"] if self.use_original_ids else None
469
+ ),
470
+ consistent=True,
471
+ ret_tensordict=True,
472
+ ).squeeze(1)
473
+ del tracked_objs_outs_td
474
+
475
+ # Optionally, remove "masks" from output tensor dict and directly encode them
476
+ # to RLE format under packed representations
477
+ if self.convert_mask_to_rle_for_video:
478
+ interpolated_binary_masks = frame_results_td.pop("masks")
479
+ rle_list = rle_encode(interpolated_binary_masks, return_areas=True)
480
+ vid_masklets_rle_packed.extend(rle_list)
481
+ # Optionally, move output TensorDict to CPU (do this after RLE encoding step above)
482
+ if self.to_cpu_for_video:
483
+ frame_results_td = frame_results_td.cpu()
484
+ vid_preds_packed.append(frame_results_td)
485
+
486
+ if len(vid_preds_packed) == 0:
487
+ logging.debug(f"Video {video_id} has no predictions")
488
+ return {video_id: []}
489
+
490
+ vid_preds_packed = torch.cat(vid_preds_packed, dim=0)
491
+ ############### Construct a padded representation of the predictions ###############
492
+ num_preds = len(tracked_objects_packed_idx)
493
+ num_frames = len(find_stages)
494
+ # We zero pad any missing prediction
495
+ # NOTE: here, we also have padded tensors for "scores" and "labels", but we overwrite them later.
496
+ padded_frames_results = TensorDict(
497
+ {
498
+ k: torch.zeros(
499
+ num_preds, num_frames, *v.shape[1:], device=v.device, dtype=v.dtype
500
+ )
501
+ for k, v in vid_preds_packed.items()
502
+ },
503
+ batch_size=[
504
+ num_preds,
505
+ num_frames,
506
+ ],
507
+ )
508
+ padded_frames_results["scores"][...] = -1e8 # a very low score for empty object
509
+ # Track scores and labels of each pred tracklet, only for frames where the model was able to track that object
510
+ tracklet_scores = []
511
+ tracklet_labels = []
512
+ # Optionally, fill the list of RLEs for masklets
513
+ # note: only frames with actual predicted masks (in packed format) will be
514
+ # filled with RLEs; the rest will remains None in results["masks_rle"]
515
+ if self.convert_mask_to_rle_for_video:
516
+ vid_masklets_rle_padded = [[None] * num_frames for _ in range(num_preds)]
517
+ for o_idx, oid in enumerate(tracked_objects_packed_idx):
518
+ oid2packed_idx = tracked_objects_packed_idx[oid]
519
+ oid2padded_idx = tracked_objects_frame_idx[oid]
520
+ obj_packed_results = vid_preds_packed[oid2packed_idx]
521
+ padded_frames_results[o_idx][oid2padded_idx] = obj_packed_results
522
+ if self.convert_mask_to_rle_for_video:
523
+ for packed_idx, padded_idx in zip(oid2packed_idx, oid2padded_idx):
524
+ vid_masklets_rle_padded[o_idx][padded_idx] = (
525
+ vid_masklets_rle_packed[packed_idx]
526
+ )
527
+ # NOTE: We need a single confidence score per tracklet for the mAP metric.
528
+ # We use the average confidence score across time. (How does this impact AP?)
529
+ tracklet_scores.append(obj_packed_results["scores"].mean())
530
+ # We also need to have a unique category Id per tracklet.
531
+ # This is not a problem for phrase AP, however, for mAP we do majority voting across time.
532
+ tracklet_labels.append(obj_packed_results["labels"].mode()[0])
533
+
534
+ results = padded_frames_results.to_dict()
535
+ results["scores"] = torch.stack(tracklet_scores, dim=0)
536
+ results["labels"] = torch.stack(tracklet_labels, dim=0)
537
+ if self.convert_mask_to_rle_for_video:
538
+ results["masks_rle"] = vid_masklets_rle_padded
539
+ # we keep the frame-level scores since it's needed by some evaluation scripts
540
+ results["per_frame_scores"] = padded_frames_results["scores"]
541
+
542
+ return {video_id: results}
543
+
544
+
545
+ class PostProcessTracking(PostProcessImage):
546
+ """This module converts the model's output into the format expected by the coco api"""
547
+
548
+ def __init__(
549
+ self,
550
+ max_dets_per_img: int,
551
+ iou_type="bbox",
552
+ force_single_mask: bool = False,
553
+ **kwargs,
554
+ ) -> None:
555
+ super().__init__(max_dets_per_img=max_dets_per_img, iou_type=iou_type, **kwargs)
556
+ self.force_single_mask = force_single_mask
557
+
558
+ def process_results(
559
+ self, find_stages, find_metadatas: BatchedInferenceMetadata, **kwargs
560
+ ):
561
+ assert len(find_stages) == len(find_metadatas)
562
+ results = {}
563
+ for outputs, meta in zip(find_stages, find_metadatas):
564
+ if self.force_single_mask:
565
+ scores, labels = outputs["pred_logits"].max(-1)
566
+ m = []
567
+ for i in range(len(outputs["pred_masks"])):
568
+ score, idx = scores[i].max(0)
569
+ m.append(outputs["pred_masks"][i][idx])
570
+ outputs["pred_masks"] = torch.stack(m, 0).unsqueeze(1)
571
+ detection_results = self(outputs, meta.original_size, consistent=False)
572
+ assert len(detection_results) == len(meta.coco_image_id)
573
+ results.update(
574
+ {
575
+ (media_id.item(), object_id.item(), frame_index.item()): result
576
+ for media_id, object_id, frame_index, result in zip(
577
+ meta.original_image_id,
578
+ meta.object_id,
579
+ meta.frame_index,
580
+ detection_results,
581
+ )
582
+ }
583
+ )
584
+ return results
585
+
586
+
587
+ class PostProcessCounting(nn.Module):
588
+ """This module converts the model's output to be evaluated for counting tasks"""
589
+
590
+ def __init__(
591
+ self,
592
+ use_original_ids: bool = False,
593
+ threshold: float = 0.5,
594
+ use_presence: bool = False,
595
+ ) -> None:
596
+ """
597
+ Args:
598
+ use_original_ids: whether to use the original image ids or the coco ids
599
+ threshold: threshold for counting (values above this are counted)
600
+ """
601
+ super().__init__()
602
+ self.use_original_ids = use_original_ids
603
+ self.threshold = threshold
604
+ self.use_presence = use_presence
605
+
606
+ def forward(self, outputs, target_sizes):
607
+ """Perform the computation
608
+ Parameters:
609
+ outputs: raw outputs of the model
610
+ target_sizes: tensor of dimension [batch_size x 2] containing the size of each images of the batch
611
+ """
612
+ # Extract scores from model outputs and apply sigmoid
613
+ scores = torch.sigmoid(outputs["pred_logits"]).squeeze(-1) # [B, N]
614
+ if self.use_presence:
615
+ presence_score = outputs["presence_logit_dec"].sigmoid()
616
+ if presence_score.ndim == 1:
617
+ presence_score = presence_score.unsqueeze(1) # [B, 1]
618
+ scores = scores * presence_score # [B, N]
619
+
620
+ # Calculate counts by summing values above threshold
621
+ counts = (scores > self.threshold).float().sum(dim=1)
622
+
623
+ assert len(counts) == len(target_sizes)
624
+ results = []
625
+ for count in counts:
626
+ results.append({"count": count.item()})
627
+
628
+ return results
629
+
630
+ @torch.no_grad()
631
+ def process_results(
632
+ self, find_stages, find_metadatas: List[BatchedInferenceMetadata], **kwargs
633
+ ):
634
+ assert len(find_stages) == len(find_metadatas)
635
+ results = {}
636
+ for outputs, meta in zip(find_stages, find_metadatas):
637
+ detection_results = self(
638
+ outputs,
639
+ meta.original_size,
640
+ )
641
+ ids = (
642
+ meta.original_image_id if self.use_original_ids else meta.coco_image_id
643
+ )
644
+ assert len(detection_results) == len(ids)
645
+ for img_id, result in zip(ids, detection_results):
646
+ results[img_id.item()] = result
647
+
648
+ return results
source_code/sam3/sam3/eval/saco_veval_evaluators.py ADDED
@@ -0,0 +1,838 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
2
+ import json
3
+ import os
4
+ import tempfile
5
+ from collections import defaultdict
6
+ from typing import Dict, Optional, Sequence, Tuple
7
+
8
+ import numpy as np
9
+ import pycocotools.mask
10
+ from sam3.eval.cgf1_eval import CGF1_METRICS
11
+ from sam3.eval.conversion_util import (
12
+ convert_ytbvis_to_cocovid_gt,
13
+ convert_ytbvis_to_cocovid_pred,
14
+ )
15
+ from sam3.eval.hota_eval_toolkit.run_ytvis_eval import run_ytvis_eval
16
+ from sam3.eval.teta_eval_toolkit import config, Evaluator, metrics
17
+ from sam3.eval.teta_eval_toolkit.datasets import COCO, TAO
18
+ from sam3.eval.ytvis_coco_wrapper import YTVIS
19
+ from sam3.eval.ytvis_eval import VideoDemoF1Eval, YTVISeval
20
+ from sam3.train.nms_helper import process_frame_level_nms, process_track_level_nms
21
+
22
+
23
+ def _get_metric_index(metric_name: str, iou_threshold: Optional[float] = None) -> int:
24
+ """
25
+ Find the index of a metric in CGF1_METRICS by name and IoU threshold.
26
+
27
+ Args:
28
+ metric_name: Name of the metric (e.g., "cgF1", "precision", "recall")
29
+ iou_threshold: IoU threshold (None for average over 0.5:0.95, or specific value like 0.5, 0.75)
30
+
31
+ Returns:
32
+ Index of the metric in CGF1_METRICS
33
+
34
+ Raises:
35
+ ValueError: If metric not found
36
+ """
37
+ for idx, metric in enumerate(CGF1_METRICS):
38
+ if metric.name == metric_name and metric.iou_threshold == iou_threshold:
39
+ return idx
40
+ raise ValueError(
41
+ f"Metric '{metric_name}' with IoU threshold {iou_threshold} not found in CGF1_METRICS"
42
+ )
43
+
44
+
45
+ class BasePredFileEvaluator:
46
+ """A base class for evaluating a prediction file."""
47
+
48
+ pass
49
+
50
+
51
+ class YTVISPredFileEvaluator(BasePredFileEvaluator):
52
+ """Evaluate class mAP for YT-VIS prediction files."""
53
+
54
+ def __init__(
55
+ self,
56
+ gt_ann_file: str,
57
+ dataset_name: str = "video",
58
+ iou_types: Optional[Sequence[str]] = None,
59
+ ):
60
+ self.gt_ann_file = gt_ann_file
61
+ self.dataset_name = dataset_name
62
+ self.iou_types = list(iou_types) if iou_types is not None else ["bbox", "segm"]
63
+ assert all(iou_type in ["bbox", "segm"] for iou_type in self.iou_types)
64
+
65
+ def evaluate(self, pred_file: str) -> Dict[str, float]:
66
+ # use our internal video evaluation toolkit for YT-VIS pred file
67
+ # (i.e. the same one we're using for video phrase AP)
68
+ results = {}
69
+ use_cats = True # YT-VIS mAP evaluation uses categories
70
+ ytvisGT = YTVIS(self.gt_ann_file, ignore_gt_cats=not use_cats)
71
+ # the original YT-VIS GT annotations have uncompressed RLEs ("counts" is an integer list)
72
+ # rather than compressed RLEs ("counts" is a string), so we first convert them here.
73
+ if "segm" in self.iou_types:
74
+ for ann in ytvisGT.dataset["annotations"]:
75
+ ann["segmentations"] = [
76
+ _compress_rle(rle) for rle in ann["segmentations"]
77
+ ]
78
+
79
+ with open(pred_file) as f:
80
+ dt = json.load(f)
81
+ # Our prediction file saves "video_id" and absolute (unnormalized) boxes.
82
+ # Note that we should use the official (original) YT-VIS annotations (i.e. the one
83
+ # saved via "scripts/datasets/training/ytvis_split.py", instead of the one saved
84
+ # via "scripts/api_db_to_ytvis_json.py") in this evaluator, which contain absolute
85
+ # boxes coordinates in its GT annotations.
86
+ for d in dt:
87
+ d["image_id"] = d["video_id"]
88
+ ytvisDT = ytvisGT.loadRes(dt)
89
+
90
+ for iou_type in self.iou_types:
91
+ ytvisEval = YTVISeval(ytvisGT, ytvisDT, iou_type)
92
+
93
+ # set the area ranges for small, medium, and large objects (using
94
+ # absolute pixel areas) as in the official YT-VIS evaluation toolkit:
95
+ # https://github.com/achalddave/ytvosapi/blob/eca601117c9f86bad084cb91f1d918e9ab665a75/PythonAPI/ytvostools/ytvoseval.py#L538
96
+ ytvisEval.params.areaRng = [
97
+ [0**2, 1e5**2],
98
+ [0**2, 128**2],
99
+ [128**2, 256**2],
100
+ [256**2, 1e5**2],
101
+ ]
102
+ ytvisEval.params.areaRngLbl = ["all", "small", "medium", "large"]
103
+ ytvisEval.params.useCats = use_cats
104
+
105
+ ytvisEval.evaluate()
106
+ ytvisEval.accumulate()
107
+ ytvisEval.summarize()
108
+ result_key = f"{self.dataset_name}_{'mask' if iou_type == 'segm' else 'bbox'}_mAP_50_95"
109
+ results[result_key] = ytvisEval.stats[0]
110
+
111
+ # video-NP level results not supported for `YTVISPredFileEvaluator` yet
112
+ video_np_level_results = {}
113
+ return results, video_np_level_results
114
+
115
+
116
+ class VideoPhraseApEvaluator(BasePredFileEvaluator):
117
+ """Evaluate Video Phrase AP with YT-VIS format prediction and GT files."""
118
+
119
+ def __init__(
120
+ self,
121
+ gt_ann_file: str,
122
+ dataset_name: str = "video",
123
+ iou_types: Optional[Sequence[str]] = None,
124
+ ):
125
+ self.gt_ann_file = gt_ann_file
126
+ self.dataset_name = dataset_name
127
+ self.iou_types = list(iou_types) if iou_types is not None else ["bbox", "segm"]
128
+ assert all(iou_type in ["bbox", "segm"] for iou_type in self.iou_types)
129
+
130
+ def evaluate(self, pred_file: str) -> Dict[str, float]:
131
+ with open(self.gt_ann_file) as f:
132
+ gt = json.load(f)
133
+ with open(pred_file) as f:
134
+ dt = json.load(f)
135
+ # For phrase AP and demo F1 evaluation, we need to remap each pair of (video_id, category_id) to
136
+ # a new unique video_id, so that we don't mix detections from different categories under `useCat=False`
137
+ gt, dt = remap_video_category_pairs_to_unique_video_ids(gt, dt)
138
+ if "segm" in self.iou_types:
139
+ for ann in gt["annotations"]:
140
+ ann["segmentations"] = [
141
+ _compress_rle(rle) for rle in ann["segmentations"]
142
+ ]
143
+ for d in dt:
144
+ d["image_id"] = d["video_id"]
145
+
146
+ results = {}
147
+ use_cats = False # Phrase AP evaluation does not use categories
148
+ ytvisGT = YTVIS(annotation_file=None, ignore_gt_cats=not use_cats)
149
+ ytvisGT.dataset = gt
150
+ ytvisGT.createIndex()
151
+ ytvisDT = ytvisGT.loadRes(dt)
152
+
153
+ for iou_type in self.iou_types:
154
+ phraseApEval = YTVISeval(ytvisGT, ytvisDT, iou_type)
155
+
156
+ # set the area ranges for small, medium, and large objects (using
157
+ # absolute pixel areas) as in the official YT-VIS evaluation toolkit:
158
+ # https://github.com/achalddave/ytvosapi/blob/eca601117c9f86bad084cb91f1d918e9ab665a75/PythonAPI/ytvostools/ytvoseval.py#L538
159
+ phraseApEval.params.areaRng = [
160
+ [0**2, 1e5**2],
161
+ [0**2, 128**2],
162
+ [128**2, 256**2],
163
+ [256**2, 1e5**2],
164
+ ]
165
+ phraseApEval.params.areaRngLbl = ["all", "small", "medium", "large"]
166
+ phraseApEval.params.useCats = use_cats
167
+
168
+ phraseApEval.evaluate()
169
+ phraseApEval.accumulate()
170
+ phraseApEval.summarize()
171
+ result_prefix = f"{self.dataset_name}"
172
+ result_prefix += f"_{'mask' if iou_type == 'segm' else 'bbox'}_phrase_ap"
173
+ # fetch Phrase AP results from the corresponding indices in `phraseApEval.stats`
174
+ # (see `_summarizeDets` in https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocotools/cocoeval.py)
175
+ results[result_prefix + "_50_95"] = phraseApEval.stats[0] # IoU=0.5:0.95
176
+ results[result_prefix + "_50"] = phraseApEval.stats[1] # IoU=0.5
177
+ results[result_prefix + "_75"] = phraseApEval.stats[2] # IoU=0.75
178
+
179
+ # video-NP level results not supported for `VideoPhraseApEvaluator` yet
180
+ video_np_level_results = {}
181
+ return results, video_np_level_results
182
+
183
+
184
+ class VideoCGF1Evaluator(BasePredFileEvaluator):
185
+ """Evaluate Video Demo F1 with YT-VIS format prediction and GT files."""
186
+
187
+ def __init__(
188
+ self,
189
+ gt_ann_file: str,
190
+ dataset_name: str = "video",
191
+ prob_thresh: float = 0.5,
192
+ iou_types: Optional[Sequence[str]] = None,
193
+ ):
194
+ self.gt_ann_file = gt_ann_file
195
+ self.dataset_name = dataset_name
196
+ self.prob_thresh = prob_thresh
197
+ self.iou_types = list(iou_types) if iou_types is not None else ["bbox", "segm"]
198
+ assert all(iou_type in ["bbox", "segm"] for iou_type in self.iou_types)
199
+
200
+ def evaluate(self, pred_file: str) -> Dict[str, float]:
201
+ with open(self.gt_ann_file) as f:
202
+ gt = json.load(f)
203
+ with open(pred_file) as f:
204
+ dt = json.load(f)
205
+ # compute IL_MCC and CG-F1 can only be computed if we have "video_np_pairs" keys in the GT JSON
206
+ compute_ilmcc_and_cgf1 = "video_np_pairs" in gt
207
+ if not compute_ilmcc_and_cgf1:
208
+ print(
209
+ f"Warning: IL_MCC and CG-F1 are not computed for {pred_file=} as it does not have 'video_np_pairs' keys in the GT JSON"
210
+ )
211
+ # For phrase AP and demo F1 evaluation, we need to remap each pair of (video_id, category_id) to
212
+ # a new unique video_id, so that we don't mix detections from different categories under `useCat=False`
213
+ gt, dt = remap_video_category_pairs_to_unique_video_ids(
214
+ gt, dt, add_negative_np_pairs=compute_ilmcc_and_cgf1
215
+ )
216
+ if "segm" in self.iou_types:
217
+ for ann in gt["annotations"]:
218
+ ann["segmentations"] = [
219
+ _compress_rle(rle) for rle in ann["segmentations"]
220
+ ]
221
+ for d in dt:
222
+ d["image_id"] = d["video_id"]
223
+
224
+ results = {}
225
+ use_cats = False # Demo F1 evaluation does not use categories
226
+ ytvisGT = YTVIS(annotation_file=None, ignore_gt_cats=not use_cats)
227
+ ytvisGT.dataset = gt
228
+ ytvisGT.createIndex()
229
+ ytvisDT = ytvisGT.loadRes(dt)
230
+
231
+ video_np_level_results = {}
232
+ for iou_type in self.iou_types:
233
+ demoF1Eval = VideoDemoF1Eval(ytvisGT, ytvisDT, iou_type, self.prob_thresh)
234
+
235
+ demoF1Eval.params.useCats = use_cats
236
+ demoF1Eval.params.areaRng = [[0**2, 1e5**2]]
237
+ demoF1Eval.params.areaRngLbl = ["all"]
238
+ demoF1Eval.params.maxDets = [100000]
239
+
240
+ demoF1Eval.evaluate()
241
+ demoF1Eval.accumulate()
242
+ demoF1Eval.summarize()
243
+ result_prefix = f"{self.dataset_name}"
244
+ result_prefix += f"_{'mask' if iou_type == 'segm' else 'bbox'}_demo"
245
+
246
+ stats = demoF1Eval.stats
247
+
248
+ if compute_ilmcc_and_cgf1:
249
+ # Average IoU threshold (0.5:0.95)
250
+ cgf1_micro_avg_idx = _get_metric_index("cgF1", None)
251
+ positive_micro_f1_avg_idx = _get_metric_index("positive_micro_F1", None)
252
+ ilmcc_avg_idx = _get_metric_index("IL_MCC", None)
253
+ results[result_prefix + "_cgf1_micro_50_95"] = stats[cgf1_micro_avg_idx]
254
+ results[result_prefix + "_ilmcc_50_95"] = stats[ilmcc_avg_idx]
255
+ results[result_prefix + "_positive_micro_f1_50_95"] = stats[
256
+ positive_micro_f1_avg_idx
257
+ ]
258
+
259
+ # IoU = 0.5
260
+ cgf1_micro_50_idx = _get_metric_index("cgF1", 0.5)
261
+ positive_micro_f1_50_idx = _get_metric_index("positive_micro_F1", 0.5)
262
+ results[result_prefix + "_cgf1_micro_50"] = stats[cgf1_micro_50_idx]
263
+ results[result_prefix + "_ilmcc_50"] = float(
264
+ np.array(stats[cgf1_micro_50_idx])
265
+ / np.array(stats[positive_micro_f1_50_idx])
266
+ )
267
+ results[result_prefix + "_positive_micro_f1_50"] = stats[
268
+ positive_micro_f1_50_idx
269
+ ]
270
+
271
+ # IoU = 0.75
272
+ cgf1_micro_75_idx = _get_metric_index("cgF1", 0.75)
273
+ positive_micro_f1_75_idx = _get_metric_index("positive_micro_F1", 0.75)
274
+ results[result_prefix + "_cgf1_micro_75"] = stats[cgf1_micro_75_idx]
275
+ results[result_prefix + "_ilmcc_75"] = float(
276
+ np.array(stats[cgf1_micro_75_idx])
277
+ / np.array(stats[positive_micro_f1_75_idx])
278
+ )
279
+ results[result_prefix + "_positive_micro_f1_75"] = stats[
280
+ positive_micro_f1_75_idx
281
+ ]
282
+
283
+ self.extract_video_np_level_results(demoF1Eval, video_np_level_results)
284
+
285
+ return results, video_np_level_results
286
+
287
+ def extract_video_np_level_results(self, demoF1Eval, video_np_level_results):
288
+ """Aggregate statistics for video-level metrics."""
289
+ num_iou_thrs = len(demoF1Eval.params.iouThrs)
290
+ iou_50_index = int(np.where(demoF1Eval.params.iouThrs == 0.5)[0])
291
+ iou_75_index = int(np.where(demoF1Eval.params.iouThrs == 0.75)[0])
292
+
293
+ result_prefix = "mask" if demoF1Eval.params.iouType == "segm" else "bbox"
294
+
295
+ assert len(demoF1Eval.evalImgs) == len(demoF1Eval.cocoGt.dataset["images"])
296
+ for i, video in enumerate(demoF1Eval.cocoGt.dataset["images"]):
297
+ # the original video id and category id before remapping
298
+ video_id = video["orig_video_id"]
299
+ category_id = video["orig_category_id"]
300
+ eval_img_dict = demoF1Eval.evalImgs[i]
301
+
302
+ TPs = eval_img_dict.get("TPs", np.zeros(num_iou_thrs, dtype=np.int64))
303
+ FPs = eval_img_dict.get("FPs", np.zeros(num_iou_thrs, dtype=np.int64))
304
+ FNs = eval_img_dict.get("FNs", np.zeros(num_iou_thrs, dtype=np.int64))
305
+ assert len(TPs) == len(FPs) == len(FNs) == num_iou_thrs
306
+ # F1 = 2*TP / (2*TP + FP + FN), and we set F1 to 1.0 if denominator is 0
307
+ denominator = 2 * TPs + FPs + FNs
308
+ F1s = np.where(denominator > 0, 2 * TPs / np.maximum(denominator, 1), 1.0)
309
+ local_results = {
310
+ f"{result_prefix}_TP_50_95": float(TPs.mean()),
311
+ f"{result_prefix}_FP_50_95": float(FPs.mean()),
312
+ f"{result_prefix}_FN_50_95": float(FNs.mean()),
313
+ f"{result_prefix}_F1_50_95": float(F1s.mean()),
314
+ f"{result_prefix}_TP_50": float(TPs[iou_50_index]),
315
+ f"{result_prefix}_FP_50": float(FPs[iou_50_index]),
316
+ f"{result_prefix}_FN_50": float(FNs[iou_50_index]),
317
+ f"{result_prefix}_F1_50": float(F1s[iou_50_index]),
318
+ f"{result_prefix}_TP_75": float(TPs[iou_75_index]),
319
+ f"{result_prefix}_FP_75": float(FPs[iou_75_index]),
320
+ f"{result_prefix}_FN_75": float(FNs[iou_75_index]),
321
+ f"{result_prefix}_F1_75": float(F1s[iou_75_index]),
322
+ }
323
+ if (video_id, category_id) not in video_np_level_results:
324
+ video_np_level_results[(video_id, category_id)] = {}
325
+ video_np_level_results[(video_id, category_id)].update(local_results)
326
+
327
+
328
+ class VideoTetaEvaluator(BasePredFileEvaluator):
329
+ """Evaluate TETA metric using YouTubeVIS format prediction and GT files."""
330
+
331
+ def __init__(
332
+ self,
333
+ gt_ann_file: str,
334
+ dataset_name: str = "video",
335
+ tracker_name: str = "Sam3",
336
+ nms_threshold: float = 0.5,
337
+ nms_strategy: str = "none", # "track", "frame", or "none"
338
+ prob_thresh: float = 0.5,
339
+ is_exhaustive: bool = False,
340
+ use_mask: bool = False,
341
+ num_parallel_cores: int = 8,
342
+ ):
343
+ self.gt_ann_file = gt_ann_file
344
+ self.dataset_name = dataset_name
345
+ self.tracker_name = tracker_name
346
+ self.nms_threshold = nms_threshold
347
+ self.nms_strategy = nms_strategy.lower() # Convert to lowercase for consistency
348
+ self.prob_thresh = prob_thresh
349
+ self.metric_prefix = "TETA"
350
+ self.is_exhaustive = is_exhaustive
351
+ self.use_mask = use_mask
352
+ self.num_parallel_cores = num_parallel_cores
353
+
354
+ # Verify NMS strategy is valid
355
+ valid_strategies = ["track", "frame", "none"]
356
+ print("current nms_strategy:", self.nms_strategy)
357
+ if self.nms_strategy not in valid_strategies:
358
+ raise ValueError(
359
+ f"Invalid NMS strategy: {self.nms_strategy}. Must be one of {valid_strategies}"
360
+ )
361
+
362
+ print(f"Initialized VideoTetaEvaluator with NMS strategy: {self.nms_strategy}")
363
+ print(f"Probability threshold set to: {self.prob_thresh}")
364
+ print(f"Dataset exhaustivity set to: {self.is_exhaustive}")
365
+ print(f"Tracker name set to: {self.tracker_name}")
366
+ print(f"Dataset name set to: {self.dataset_name}")
367
+ print(f"Use mask set to: {self.use_mask}")
368
+
369
+ def process_predictions(self, pred_file: str, tmp_dir: str) -> str:
370
+ """Process predictions with selected NMS strategy"""
371
+ with open(pred_file, "r") as f:
372
+ raw_preds = json.load(f)
373
+ print(f"Processing predictions with {self.nms_strategy} NMS strategy")
374
+
375
+ # Filter by score threshold
376
+ if self.prob_thresh > 0:
377
+ raw_preds = [d for d in raw_preds if d["score"] >= self.prob_thresh]
378
+ print(
379
+ f"Filtered to {len(raw_preds)} predictions with score >= {self.prob_thresh}"
380
+ )
381
+ # Group predictions by video_id
382
+ video_groups = defaultdict(list)
383
+ for pred in raw_preds:
384
+ video_groups[pred["video_id"]].append(pred)
385
+ # Process based on NMS strategy
386
+ if self.nms_strategy == "track":
387
+ process_track_level_nms(video_groups, nms_threshold=self.nms_threshold)
388
+ elif self.nms_strategy == "frame":
389
+ process_frame_level_nms(video_groups, nms_threshold=self.nms_threshold)
390
+ elif self.nms_strategy == "none":
391
+ print("Skipping NMS processing as strategy is set to 'none'")
392
+ # No processing needed for "none" strategy
393
+ # Save processed predictions
394
+ processed_preds = [
395
+ track for tracks in video_groups.values() for track in tracks
396
+ ]
397
+ processed_path = os.path.join(tmp_dir, "processed_preds.json")
398
+ with open(processed_path, "w") as f:
399
+ json.dump(processed_preds, f)
400
+
401
+ print(f"Saved processed predictions to {processed_path}")
402
+ return processed_path
403
+
404
+ def evaluate(self, pred_file: str) -> Tuple[Dict[str, float], Dict]:
405
+ """Main evaluation method"""
406
+
407
+ print(f"Evaluating TETA Metric with {self.nms_strategy.upper()} NMS strategy")
408
+ with tempfile.TemporaryDirectory() as tmp_dir:
409
+ # Process predictions first
410
+ processed_pred_file = self.process_predictions(pred_file, tmp_dir)
411
+
412
+ # Convert GT to COCO-vid format
413
+ gt_dir = os.path.join(tmp_dir, "gt")
414
+ os.makedirs(gt_dir, exist_ok=True)
415
+ gt_coco_path = os.path.join(gt_dir, "annotations.json")
416
+ convert_ytbvis_to_cocovid_gt(self.gt_ann_file, gt_coco_path)
417
+
418
+ # Convert processed predictions to COCO-vid format
419
+ pred_dir = os.path.join(tmp_dir, "predictions")
420
+ tracker_dir = os.path.join(pred_dir, self.tracker_name)
421
+ os.makedirs(tracker_dir, exist_ok=True)
422
+ pred_coco_path = os.path.join(tracker_dir, "track_results_cocofmt.json")
423
+ convert_ytbvis_to_cocovid_pred(
424
+ youtubevis_pred_path=processed_pred_file,
425
+ converted_dataset_path=gt_coco_path,
426
+ output_path=pred_coco_path,
427
+ )
428
+ # Configure TETA evaluator
429
+ default_eval_config = config.get_default_eval_config()
430
+ default_eval_config["PRINT_ONLY_COMBINED"] = True
431
+ default_eval_config["DISPLAY_LESS_PROGRESS"] = True
432
+ default_eval_config["OUTPUT_TEMP_RAW_DATA"] = True
433
+ default_eval_config["NUM_PARALLEL_CORES"] = self.num_parallel_cores
434
+ default_dataset_config = config.get_default_dataset_config()
435
+ default_dataset_config["TRACKERS_TO_EVAL"] = [self.tracker_name]
436
+ default_dataset_config["GT_FOLDER"] = gt_dir
437
+ default_dataset_config["OUTPUT_FOLDER"] = pred_dir
438
+ default_dataset_config["TRACKER_SUB_FOLDER"] = tracker_dir
439
+ default_dataset_config["USE_MASK"] = self.use_mask
440
+
441
+ evaluator = Evaluator(default_eval_config)
442
+ if self.is_exhaustive:
443
+ dataset_list = [COCO(default_dataset_config)]
444
+ dataset_parsing_key = "COCO"
445
+ else:
446
+ dataset_list = [TAO(default_dataset_config)]
447
+ dataset_parsing_key = "TAO"
448
+
449
+ # Run evaluation
450
+ eval_results, _ = evaluator.evaluate(
451
+ dataset_list, [metrics.TETA(exhaustive=self.is_exhaustive)]
452
+ )
453
+
454
+ # Extract and format results
455
+ results = {
456
+ f"{self.dataset_name}_{'mask' if self.use_mask else 'bbox'}_teta": float(
457
+ eval_results[dataset_parsing_key]["TETA"][0]
458
+ ),
459
+ f"{self.dataset_name}_{'mask' if self.use_mask else 'bbox'}_loc_a": float(
460
+ eval_results[dataset_parsing_key]["TETA"][1]
461
+ ),
462
+ f"{self.dataset_name}_{'mask' if self.use_mask else 'bbox'}_assoc_a": float(
463
+ eval_results[dataset_parsing_key]["TETA"][2]
464
+ ),
465
+ f"{self.dataset_name}_{'mask' if self.use_mask else 'bbox'}_cls_a": float(
466
+ eval_results[dataset_parsing_key]["TETA"][3]
467
+ ),
468
+ f"{self.dataset_name}_{'mask' if self.use_mask else 'bbox'}_loc_re": float(
469
+ eval_results[dataset_parsing_key]["TETA"][4]
470
+ ),
471
+ f"{self.dataset_name}_{'mask' if self.use_mask else 'bbox'}_loc_pr": float(
472
+ eval_results[dataset_parsing_key]["TETA"][5]
473
+ ),
474
+ f"{self.dataset_name}_{'mask' if self.use_mask else 'bbox'}_assoc_re": float(
475
+ eval_results[dataset_parsing_key]["TETA"][6]
476
+ ),
477
+ f"{self.dataset_name}_{'mask' if self.use_mask else 'bbox'}_assoc_pr": float(
478
+ eval_results[dataset_parsing_key]["TETA"][7]
479
+ ),
480
+ f"{self.dataset_name}_{'mask' if self.use_mask else 'bbox'}_cls_re": float(
481
+ eval_results[dataset_parsing_key]["TETA"][8]
482
+ ),
483
+ f"{self.dataset_name}_{'mask' if self.use_mask else 'bbox'}_cls_pr": float(
484
+ eval_results[dataset_parsing_key]["TETA"][9]
485
+ ),
486
+ }
487
+
488
+ # video-NP level results not supported for `VideoTetaEvaluator` yet
489
+ video_np_level_results = {}
490
+ return results, video_np_level_results
491
+
492
+
493
+ class VideoPhraseHotaEvaluator(BasePredFileEvaluator):
494
+ """Evaluate Video Phrase HOTA with YT-VIS format prediction and GT files."""
495
+
496
+ def __init__(
497
+ self,
498
+ gt_ann_file: str,
499
+ dataset_name: str = "video",
500
+ prob_thresh: float = 0.5,
501
+ iou_types: Optional[Sequence[str]] = None,
502
+ compute_video_mot_hota: bool = False,
503
+ ):
504
+ self.gt_ann_file = gt_ann_file
505
+ self.dataset_name = dataset_name
506
+ self.prob_thresh = prob_thresh
507
+ self.metric_prefix = "phrase"
508
+ # the list of metrics to collect from the HOTA evaluation results
509
+ self.metric_to_collect = [
510
+ "HOTA",
511
+ "DetA",
512
+ "AssA",
513
+ "DetRe",
514
+ "DetPr",
515
+ "AssRe",
516
+ "AssPr",
517
+ "LocA",
518
+ "OWTA",
519
+ ]
520
+ self.iou_types = list(iou_types) if iou_types is not None else ["bbox", "segm"]
521
+ assert all(iou_type in ["bbox", "segm"] for iou_type in self.iou_types)
522
+
523
+ # If True, compute video MOT HOTA, aggregating predictions/GT from all categories.
524
+ self.compute_video_mot_hota = compute_video_mot_hota
525
+
526
+ def evaluate(self, pred_file: str) -> Dict[str, float]:
527
+ # use the YT-VIS evaluation toolkit in TrackEval
528
+
529
+ with open(self.gt_ann_file) as f:
530
+ gt = json.load(f)
531
+ with open(pred_file) as f:
532
+ dt = json.load(f)
533
+ # keep only predictions with score above the probability threshold
534
+ dt = [d for d in dt if d["score"] > self.prob_thresh]
535
+ for d in dt:
536
+ assert len(d["areas"]) == len(d["bboxes"])
537
+ assert len(d["areas"]) == len(d["segmentations"])
538
+ # remove empty boxes (otherwise they will count as false positives for during
539
+ # per-frame detection accuracy in HOTA evaluation)
540
+ for t in range(len(d["bboxes"])):
541
+ bbox = d["bboxes"][t]
542
+ if d["areas"][t] == 0 or bbox is None or all(x == 0 for x in bbox):
543
+ d["segmentations"][t] = None
544
+ d["bboxes"][t] = None
545
+ d["areas"][t] = None
546
+ # check that box occurence and mask occurence are consistent
547
+ for bbox, mask, area in zip(d["bboxes"], d["segmentations"], d["areas"]):
548
+ assert (area is None) == (bbox is None)
549
+ assert (area is None) == (mask is None)
550
+ # set all scores to 1.0 for HOTA evaluation (just like Demo F1, the exact score
551
+ # value is not used in HOTA metrics; it will be treated as a detection prediction
552
+ # as long as its score is above the threshold)
553
+ d["score"] = 1.0
554
+
555
+ # remap the GT and DT annotations for phrase HOTA evaluation
556
+ gt = _fill_in_ann_height_width(gt)
557
+ if not self.compute_video_mot_hota:
558
+ # remap the GT and DT annotations for phrase HOTA evaluation
559
+ gt, dt = self._remap_gt_dt(gt, dt)
560
+ else:
561
+ # Compute video-level MOT HOTA
562
+ # Apply track-level NMS
563
+ video_groups = defaultdict(list)
564
+ for pred in dt:
565
+ video_groups[pred["video_id"]].append(pred)
566
+ process_track_level_nms(video_groups, nms_threshold=0.5)
567
+ dt = [track for tracks in video_groups.values() for track in tracks]
568
+
569
+ # Remap GT track ids for class-agnostic HOTA
570
+ gt, dt = remap_gt_dt_class_agnostic(gt, dt)
571
+
572
+ # run the HOTA evaluation using TrackEval on the remapped (video_id, category_id) pairs
573
+ out_dict = {}
574
+ video_np_level_results = {}
575
+ for iou_type in self.iou_types:
576
+ output_res, _ = run_ytvis_eval(
577
+ args=[
578
+ "--METRICS",
579
+ "HOTA",
580
+ "--IOU_TYPE",
581
+ iou_type,
582
+ "--DATASET_NAME",
583
+ self.dataset_name,
584
+ "--USE_PARALLEL",
585
+ "True",
586
+ "--NUM_PARALLEL_CORES",
587
+ "8",
588
+ "--PLOT_CURVES",
589
+ "False",
590
+ "--LOG_ON_ERROR",
591
+ "None",
592
+ "--PRINT_ONLY_COMBINED",
593
+ "True",
594
+ "--OUTPUT_SUMMARY",
595
+ "False",
596
+ "--OUTPUT_DETAILED",
597
+ "False",
598
+ "--TIME_PROGRESS",
599
+ "False",
600
+ "--PRINT_CONFIG",
601
+ "False",
602
+ ],
603
+ gt_json=gt,
604
+ dt_json=dt,
605
+ )
606
+ self.extract_video_np_level_results(
607
+ iou_type=iou_type,
608
+ remapped_gt=gt,
609
+ raw_results=output_res[self.dataset_name]["tracker"],
610
+ video_np_level_results=video_np_level_results,
611
+ )
612
+
613
+ def _summarize_results(output_res, iou_type, field, suffix):
614
+ eval_res = output_res[self.dataset_name]["tracker"][field]
615
+ result_prefix = f"{self.dataset_name}_{'mask' if iou_type == 'segm' else 'bbox'}_{suffix}"
616
+ for metric_name in self.metric_to_collect:
617
+ eval_res_hota = eval_res["cls_comb_cls_av"]["HOTA"]
618
+ result_key = f"{result_prefix}_{self.metric_prefix}_{metric_name}"
619
+ result_value = float(np.mean(eval_res_hota[metric_name]))
620
+ out_dict[result_key] = result_value
621
+
622
+ _summarize_results(output_res, iou_type, "COMBINED_SEQ", "all")
623
+ if "COMBINED_SEQ_CHALLENGING" in output_res[self.dataset_name]["tracker"]:
624
+ _summarize_results(
625
+ output_res, iou_type, "COMBINED_SEQ_CHALLENGING", "challenging"
626
+ )
627
+
628
+ # video-NP level results not supported for `VideoPhraseHotaEvaluator` yet
629
+ return out_dict, video_np_level_results
630
+
631
+ def _remap_gt_dt(self, gt, dt):
632
+ # For phrase HOTA evaluation, we need to remap each pair of (video_id, category_id) to
633
+ # a new unique video_id, so that we don't mix detections from different categories
634
+ gt, dt = remap_video_category_pairs_to_unique_video_ids(gt, dt)
635
+ # We further map all the categories to category_id=1 in HOTA evaluation toolkit
636
+ # for phrase HOTA (similar to "useCat=False" for video phrase AP)
637
+ remapped_category_id = 1
638
+ gt["categories"] = [
639
+ {
640
+ "supercategory": "object",
641
+ "id": remapped_category_id,
642
+ "name": "_REMAPPED_FOR_PHRASE_METRICS_",
643
+ }
644
+ ]
645
+ for ann in gt["annotations"]:
646
+ ann["category_id"] = remapped_category_id
647
+ for d in dt:
648
+ d["category_id"] = remapped_category_id
649
+ # To be compatible with the TrackEval YT-VIS evaluation toolkit, we need to give
650
+ # unique filenames to each remapped video, so we add remapped video_id as prefix.
651
+ for video in gt["videos"]:
652
+ new_video_id = video["id"]
653
+ video["file_names"] = [
654
+ f"remapped_vid_{new_video_id:012d}/{name}"
655
+ for name in video["file_names"]
656
+ ]
657
+ return gt, dt
658
+
659
+ def extract_video_np_level_results(
660
+ self, iou_type, remapped_gt, raw_results, video_np_level_results
661
+ ):
662
+ """Aggregate statistics for video-level metrics."""
663
+ result_prefix = "mask" if iou_type == "segm" else "bbox"
664
+ for video in remapped_gt["videos"]:
665
+ # the original video id and category id before remapping
666
+ video_id = video["orig_video_id"]
667
+ category_id = video["orig_category_id"]
668
+ video_key = f"remapped_vid_{video['id']:012d}"
669
+ results = raw_results[video_key]["_REMAPPED_FOR_PHRASE_METRICS_"]["HOTA"]
670
+
671
+ local_results = {}
672
+ for metric_name in self.metric_to_collect:
673
+ result_key = f"{result_prefix}_{metric_name}"
674
+ local_results[result_key] = float(results[metric_name].mean())
675
+ if (video_id, category_id) not in video_np_level_results:
676
+ video_np_level_results[(video_id, category_id)] = {}
677
+ video_np_level_results[(video_id, category_id)].update(local_results)
678
+
679
+
680
+ class VideoClassBasedHotaEvaluator(VideoPhraseHotaEvaluator):
681
+ def __init__(
682
+ self,
683
+ gt_ann_file: str,
684
+ dataset_name: str = "video",
685
+ prob_thresh: float = 0.5,
686
+ ):
687
+ super().__init__(gt_ann_file, dataset_name, prob_thresh)
688
+ self.metric_prefix = "class"
689
+
690
+ def _remap_gt_dt(self, gt, dt):
691
+ return gt, dt # no remapping needed for class-based HOTA evaluation
692
+
693
+ def extract_video_np_level_results(self, *args, **kwargs):
694
+ pass # no video-NP level results for class-based HOTA evaluation
695
+
696
+
697
+ def _compress_rle(rle):
698
+ """Convert RLEs from uncompressed (integer list) to compressed (string) format."""
699
+ if rle is None:
700
+ return None
701
+ if isinstance(rle["counts"], list):
702
+ rle = pycocotools.mask.frPyObjects(rle, rle["size"][0], rle["size"][1])
703
+ rle["counts"] = rle["counts"].decode()
704
+ return rle
705
+
706
+
707
+ def remap_video_category_pairs_to_unique_video_ids(
708
+ gt_json, dt_json, add_negative_np_pairs=False
709
+ ):
710
+ """
711
+ Remap each pair of (video_id, category_id) to a new unique video_id. This is useful
712
+ for phrase AP and demo F1 evaluation on videos, where we have `useCat=False` and
713
+ rely on separating different NPs (from the same video) into different new video ids,
714
+ so that we don't mix detections from different categories in computeIoU under `useCat=False`.
715
+
716
+ This is consistent with how do we phrase AP and demo F1 evaluation on images, where we
717
+ use a remapped unique coco_image_id for each image-NP pair (based in its query["id"] in
718
+ CustomCocoDetectionAPI.load_queries in modulated_detection_api.py)
719
+ """
720
+ # collect the unique video_id-category_id pairs
721
+ video_id_to_video = {v["id"]: v for v in gt_json["videos"]}
722
+ video_id_category_id_pairs = set()
723
+ for pred in dt_json:
724
+ video_id_category_id_pairs.add((pred["video_id"], pred["category_id"]))
725
+ for ann in gt_json["annotations"]:
726
+ video_id_category_id_pairs.add((ann["video_id"], ann["category_id"]))
727
+
728
+ # assign the video_id-category_id pairs to unique video ids
729
+ video_id_category_id_pairs = sorted(video_id_category_id_pairs)
730
+ video_id_category_id_to_new_video_id = {
731
+ pair: (i + 1) for i, pair in enumerate(video_id_category_id_pairs)
732
+ }
733
+ # also map the negative NP pairs -- this is needed for IL_MCC and CG-F1 evaluation
734
+ if add_negative_np_pairs:
735
+ for vnp in gt_json["video_np_pairs"]:
736
+ pair = (vnp["video_id"], vnp["category_id"])
737
+ if pair not in video_id_category_id_to_new_video_id:
738
+ video_id_category_id_to_new_video_id[pair] = (
739
+ len(video_id_category_id_to_new_video_id) + 1
740
+ )
741
+
742
+ # map the "video_id" in predictions
743
+ for pred in dt_json:
744
+ pred["video_id"] = video_id_category_id_to_new_video_id[
745
+ (pred["video_id"], pred["category_id"])
746
+ ]
747
+ # map the "video_id" in gt_json["annotations"]
748
+ for ann in gt_json["annotations"]:
749
+ ann["video_id"] = video_id_category_id_to_new_video_id[
750
+ (ann["video_id"], ann["category_id"])
751
+ ]
752
+ # map and duplicate gt_json["videos"]
753
+ new_videos = []
754
+ for (
755
+ video_id,
756
+ category_id,
757
+ ), new_video_id in video_id_category_id_to_new_video_id.items():
758
+ video = video_id_to_video[video_id].copy()
759
+ video["id"] = new_video_id
760
+ # preserve the original video_id and category_id of each remapped video entry,
761
+ # so that we can associate sample-level eval metrics with the original video-NP pairs
762
+ video["orig_video_id"] = video_id
763
+ video["orig_category_id"] = category_id
764
+ new_videos.append(video)
765
+ gt_json["videos"] = new_videos
766
+
767
+ return gt_json, dt_json
768
+
769
+
770
+ def remap_gt_dt_class_agnostic(gt, dt):
771
+ """
772
+ For class-agnostic HOTA, merge all GT tracks for each video (across NPs),
773
+ ensure unique track_ids, and set all category_id to 1.
774
+ Also, add orig_video_id and orig_category_id for compatibility.
775
+ """
776
+ # 1. Remap all GT track_ids to be unique per video
777
+ gt_anns_by_video = defaultdict(list)
778
+ for ann in gt["annotations"]:
779
+ gt_anns_by_video[ann["video_id"]].append(ann)
780
+
781
+ # Ensure unique track ids across tracks of all videos
782
+ next_tid = 1
783
+ for _, anns in gt_anns_by_video.items():
784
+ # Map old track_ids to new unique ones
785
+ old_to_new_tid = {}
786
+ for ann in anns:
787
+ old_tid = ann["id"]
788
+ if old_tid not in old_to_new_tid:
789
+ old_to_new_tid[old_tid] = next_tid
790
+ next_tid += 1
791
+ ann["id"] = old_to_new_tid[old_tid]
792
+ # Set category_id to 1 for class-agnostic
793
+ ann["category_id"] = 1
794
+
795
+ # Set all GT categories to a single category
796
+ gt["categories"] = [
797
+ {
798
+ "supercategory": "object",
799
+ "id": 1,
800
+ "name": "_REMAPPED_FOR_PHRASE_METRICS_",
801
+ }
802
+ ]
803
+
804
+ # Add orig_video_id and orig_category_id to each video for compatibility
805
+ anns_by_video = defaultdict(list)
806
+ for ann in gt["annotations"]:
807
+ anns_by_video[ann["video_id"]].append(ann)
808
+ for video in gt["videos"]:
809
+ video["orig_video_id"] = video["id"]
810
+ # Use the first annotation's original category_id if available, else None
811
+ orig_cat = (
812
+ anns_by_video[video["id"]][0]["category_id"]
813
+ if anns_by_video[video["id"]]
814
+ else None
815
+ )
816
+ video["orig_category_id"] = orig_cat
817
+ video["file_names"] = [
818
+ f"remapped_vid_{video['id']:012d}/{name}" for name in video["file_names"]
819
+ ]
820
+
821
+ # Set all DT category_id to 1
822
+ for d in dt:
823
+ d["category_id"] = 1
824
+ return gt, dt
825
+
826
+
827
+ def _fill_in_ann_height_width(gt_json):
828
+ """Fill in missing height/width in GT annotations from its video info."""
829
+ video_id_to_video = {v["id"]: v for v in gt_json["videos"]}
830
+ for ann in gt_json["annotations"]:
831
+ if "height" not in ann or "width" not in ann:
832
+ video = video_id_to_video[ann["video_id"]]
833
+ if "height" not in ann:
834
+ ann["height"] = video["height"]
835
+ if "width" not in ann:
836
+ ann["width"] = video["width"]
837
+
838
+ return gt_json
source_code/sam3/sam3/eval/teta_eval_toolkit/_timing.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # fmt: off
2
+ # flake8: noqa
3
+
4
+ import inspect
5
+ from functools import wraps
6
+ from time import perf_counter
7
+
8
+ DO_TIMING = False
9
+ DISPLAY_LESS_PROGRESS = False
10
+ timer_dict = {}
11
+ counter = 0
12
+
13
+
14
+ def time(f):
15
+ @wraps(f)
16
+ def wrap(*args, **kw):
17
+ if DO_TIMING:
18
+ # Run function with timing
19
+ ts = perf_counter()
20
+ result = f(*args, **kw)
21
+ te = perf_counter()
22
+ tt = te - ts
23
+
24
+ # Get function name
25
+ arg_names = inspect.getfullargspec(f)[0]
26
+ if arg_names[0] == "self" and DISPLAY_LESS_PROGRESS:
27
+ return result
28
+ elif arg_names[0] == "self":
29
+ method_name = type(args[0]).__name__ + "." + f.__name__
30
+ else:
31
+ method_name = f.__name__
32
+
33
+ # Record accumulative time in each function for analysis
34
+ if method_name in timer_dict.keys():
35
+ timer_dict[method_name] += tt
36
+ else:
37
+ timer_dict[method_name] = tt
38
+
39
+ # If code is finished, display timing summary
40
+ if method_name == "Evaluator.evaluate":
41
+ print("")
42
+ print("Timing analysis:")
43
+ for key, value in timer_dict.items():
44
+ print("%-70s %2.4f sec" % (key, value))
45
+ else:
46
+ # Get function argument values for printing special arguments of interest
47
+ arg_titles = ["tracker", "seq", "cls"]
48
+ arg_vals = []
49
+ for i, a in enumerate(arg_names):
50
+ if a in arg_titles:
51
+ arg_vals.append(args[i])
52
+ arg_text = "(" + ", ".join(arg_vals) + ")"
53
+
54
+ # Display methods and functions with different indentation.
55
+ if arg_names[0] == "self":
56
+ print("%-74s %2.4f sec" % (" " * 4 + method_name + arg_text, tt))
57
+ elif arg_names[0] == "test":
58
+ pass
59
+ else:
60
+ global counter
61
+ counter += 1
62
+ print("%i %-70s %2.4f sec" % (counter, method_name + arg_text, tt))
63
+
64
+ return result
65
+ else:
66
+ # If config["TIME_PROGRESS"] is false, or config["USE_PARALLEL"] is true, run functions normally without timing.
67
+ return f(*args, **kw)
68
+
69
+ return wrap
source_code/sam3/sam3/eval/teta_eval_toolkit/datasets/tao.py ADDED
@@ -0,0 +1,659 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # fmt: off
2
+ # flake8: noqa
3
+
4
+ """TAO Dataset."""
5
+ import copy
6
+ import itertools
7
+ import json
8
+ import os
9
+ from collections import defaultdict
10
+
11
+ import numpy as np
12
+
13
+ from .. import _timing
14
+ from ..config import get_default_dataset_config, init_config
15
+ from ..utils import TrackEvalException
16
+ from ._base_dataset import _BaseDataset
17
+
18
+
19
+ class TAO(_BaseDataset):
20
+ """Dataset class for TAO tracking"""
21
+
22
+ def __init__(self, config=None):
23
+ """Initialize dataset, checking that all required files are present."""
24
+ super().__init__()
25
+ # Fill non-given config values with defaults
26
+ self.config = init_config(config, get_default_dataset_config(), self.get_name())
27
+ self.gt_fol = self.config["GT_FOLDER"]
28
+ self.tracker_fol = self.config["TRACKERS_FOLDER"]
29
+ self.should_classes_combine = True
30
+ self.use_super_categories = False
31
+ self.use_mask = self.config["USE_MASK"]
32
+
33
+
34
+ self.tracker_sub_fol = self.config["TRACKER_SUB_FOLDER"]
35
+ self.output_fol = self.config["OUTPUT_FOLDER"]
36
+ if self.output_fol is None:
37
+ self.output_fol = self.tracker_fol
38
+ self.output_sub_fol = self.config["OUTPUT_SUB_FOLDER"]
39
+
40
+ if self.gt_fol.endswith(".json"):
41
+ self.gt_data = json.load(open(self.gt_fol, "r"))
42
+ else:
43
+ gt_dir_files = [
44
+ file for file in os.listdir(self.gt_fol) if file.endswith(".json")
45
+ ]
46
+ if len(gt_dir_files) != 1:
47
+ raise TrackEvalException(
48
+ f"{self.gt_fol} does not contain exactly one json file."
49
+ )
50
+
51
+ with open(os.path.join(self.gt_fol, gt_dir_files[0])) as f:
52
+ self.gt_data = json.load(f)
53
+
54
+ # merge categories marked with a merged tag in TAO dataset
55
+ self._merge_categories(self.gt_data["annotations"] + self.gt_data["tracks"])
56
+
57
+ # get sequences to eval and sequence information
58
+ self.seq_list = [
59
+ vid["name"].replace("/", "-") for vid in self.gt_data["videos"]
60
+ ]
61
+ self.seq_name2seqid = {
62
+ vid["name"].replace("/", "-"): vid["id"] for vid in self.gt_data["videos"]
63
+ }
64
+ # compute mappings from videos to annotation data
65
+ self.video2gt_track, self.video2gt_image = self._compute_vid_mappings(
66
+ self.gt_data["annotations"]
67
+ )
68
+ # compute sequence lengths
69
+ self.seq_lengths = {vid["id"]: 0 for vid in self.gt_data["videos"]}
70
+ for img in self.gt_data["images"]:
71
+ self.seq_lengths[img["video_id"]] += 1
72
+ self.seq2images2timestep = self._compute_image_to_timestep_mappings()
73
+ self.seq2cls = {
74
+ vid["id"]: {
75
+ "pos_cat_ids": list(
76
+ {track["category_id"] for track in self.video2gt_track[vid["id"]]}
77
+ ),
78
+ "neg_cat_ids": vid["neg_category_ids"],
79
+ "not_exh_labeled_cat_ids": vid["not_exhaustive_category_ids"],
80
+ }
81
+ for vid in self.gt_data["videos"]
82
+ }
83
+
84
+ # Get classes to eval
85
+ considered_vid_ids = [self.seq_name2seqid[vid] for vid in self.seq_list]
86
+ seen_cats = set(
87
+ [
88
+ cat_id
89
+ for vid_id in considered_vid_ids
90
+ for cat_id in self.seq2cls[vid_id]["pos_cat_ids"]
91
+ ]
92
+ )
93
+ # only classes with ground truth are evaluated in TAO
94
+ self.valid_classes = [
95
+ cls["name"] for cls in self.gt_data["categories"] if cls["id"] in seen_cats
96
+ ]
97
+ cls_name2clsid_map = {
98
+ cls["name"]: cls["id"] for cls in self.gt_data["categories"]
99
+ }
100
+
101
+ if self.config["CLASSES_TO_EVAL"]:
102
+ self.class_list = [
103
+ cls.lower() if cls.lower() in self.valid_classes else None
104
+ for cls in self.config["CLASSES_TO_EVAL"]
105
+ ]
106
+ if not all(self.class_list):
107
+ valid_cls = ", ".join(self.valid_classes)
108
+ raise TrackEvalException(
109
+ "Attempted to evaluate an invalid class. Only classes "
110
+ f"{valid_cls} are valid (classes present in ground truth"
111
+ " data)."
112
+ )
113
+ else:
114
+ self.class_list = [cls for cls in self.valid_classes]
115
+ self.cls_name2clsid = {
116
+ k: v for k, v in cls_name2clsid_map.items() if k in self.class_list
117
+ }
118
+ self.clsid2cls_name = {
119
+ v: k for k, v in cls_name2clsid_map.items() if k in self.class_list
120
+ }
121
+ # get trackers to eval
122
+ print(self.config["TRACKERS_TO_EVAL"] )
123
+ if self.config["TRACKERS_TO_EVAL"] is None:
124
+ self.tracker_list = os.listdir(self.tracker_fol)
125
+ else:
126
+ self.tracker_list = self.config["TRACKERS_TO_EVAL"]
127
+
128
+ if self.config["TRACKER_DISPLAY_NAMES"] is None:
129
+ self.tracker_to_disp = dict(zip(self.tracker_list, self.tracker_list))
130
+ elif (self.config["TRACKERS_TO_EVAL"] is not None) and (
131
+ len(self.config["TK_DISPLAY_NAMES"]) == len(self.tracker_list)
132
+ ):
133
+ self.tracker_to_disp = dict(
134
+ zip(self.tracker_list, self.config["TK_DISPLAY_NAMES"])
135
+ )
136
+ else:
137
+ raise TrackEvalException(
138
+ "List of tracker files and tracker display names do not match."
139
+ )
140
+
141
+ self.tracker_data = {tracker: dict() for tracker in self.tracker_list}
142
+
143
+ for tracker in self.tracker_list:
144
+ if self.tracker_sub_fol.endswith(".json"):
145
+ with open(os.path.join(self.tracker_sub_fol)) as f:
146
+ curr_data = json.load(f)
147
+ else:
148
+ tr_dir = os.path.join(self.tracker_fol, tracker, self.tracker_sub_fol)
149
+ tr_dir_files = [
150
+ file for file in os.listdir(tr_dir) if file.endswith(".json")
151
+ ]
152
+ if len(tr_dir_files) != 1:
153
+ raise TrackEvalException(
154
+ f"{tr_dir} does not contain exactly one json file."
155
+ )
156
+ with open(os.path.join(tr_dir, tr_dir_files[0])) as f:
157
+ curr_data = json.load(f)
158
+
159
+ # limit detections if MAX_DETECTIONS > 0
160
+ if self.config["MAX_DETECTIONS"]:
161
+ curr_data = self._limit_dets_per_image(curr_data)
162
+
163
+ # fill missing video ids
164
+ self._fill_video_ids_inplace(curr_data)
165
+
166
+ # make track ids unique over whole evaluation set
167
+ self._make_tk_ids_unique(curr_data)
168
+
169
+ # merge categories marked with a merged tag in TAO dataset
170
+ self._merge_categories(curr_data)
171
+
172
+ # get tracker sequence information
173
+ curr_vids2tracks, curr_vids2images = self._compute_vid_mappings(curr_data)
174
+ self.tracker_data[tracker]["vids_to_tracks"] = curr_vids2tracks
175
+ self.tracker_data[tracker]["vids_to_images"] = curr_vids2images
176
+
177
+ def get_display_name(self, tracker):
178
+ return self.tracker_to_disp[tracker]
179
+
180
+ def _load_raw_file(self, tracker, seq, is_gt):
181
+ """Load a file (gt or tracker) in the TAO format
182
+
183
+ If is_gt, this returns a dict which contains the fields:
184
+ [gt_ids, gt_classes]:
185
+ list (for each timestep) of 1D NDArrays (for each det).
186
+ [gt_dets]: list (for each timestep) of lists of detections.
187
+
188
+ if not is_gt, this returns a dict which contains the fields:
189
+ [tk_ids, tk_classes, tk_confidences]:
190
+ list (for each timestep) of 1D NDArrays (for each det).
191
+ [tk_dets]: list (for each timestep) of lists of detections.
192
+ """
193
+ seq_id = self.seq_name2seqid[seq]
194
+ # file location
195
+ if is_gt:
196
+ imgs = self.video2gt_image[seq_id]
197
+ else:
198
+ imgs = self.tracker_data[tracker]["vids_to_images"][seq_id]
199
+
200
+ # convert data to required format
201
+ num_timesteps = self.seq_lengths[seq_id]
202
+ img_to_timestep = self.seq2images2timestep[seq_id]
203
+ data_keys = ["ids", "classes", "dets"]
204
+ if not is_gt:
205
+ data_keys += ["tk_confidences"]
206
+ raw_data = {key: [None] * num_timesteps for key in data_keys}
207
+ for img in imgs:
208
+ # some tracker data contains images without any ground truth info,
209
+ # these are ignored
210
+ if img["id"] not in img_to_timestep:
211
+ continue
212
+ t = img_to_timestep[img["id"]]
213
+ anns = img["annotations"]
214
+ if self.use_mask:
215
+ # When using mask, extract segmentation data
216
+ raw_data["dets"][t] = [ann.get("segmentation") for ann in anns]
217
+ else:
218
+ # When using bbox, extract bbox data
219
+ raw_data["dets"][t] = np.atleast_2d([ann["bbox"] for ann in anns]).astype(
220
+ float
221
+ )
222
+ raw_data["ids"][t] = np.atleast_1d(
223
+ [ann["track_id"] for ann in anns]
224
+ ).astype(int)
225
+ raw_data["classes"][t] = np.atleast_1d(
226
+ [ann["category_id"] for ann in anns]
227
+ ).astype(int)
228
+ if not is_gt:
229
+ raw_data["tk_confidences"][t] = np.atleast_1d(
230
+ [ann["score"] for ann in anns]
231
+ ).astype(float)
232
+
233
+ for t, d in enumerate(raw_data["dets"]):
234
+ if d is None:
235
+ raw_data["dets"][t] = np.empty((0, 4)).astype(float)
236
+ raw_data["ids"][t] = np.empty(0).astype(int)
237
+ raw_data["classes"][t] = np.empty(0).astype(int)
238
+ if not is_gt:
239
+ raw_data["tk_confidences"][t] = np.empty(0)
240
+
241
+ if is_gt:
242
+ key_map = {"ids": "gt_ids", "classes": "gt_classes", "dets": "gt_dets"}
243
+ else:
244
+ key_map = {"ids": "tk_ids", "classes": "tk_classes", "dets": "tk_dets"}
245
+ for k, v in key_map.items():
246
+ raw_data[v] = raw_data.pop(k)
247
+
248
+ raw_data["num_timesteps"] = num_timesteps
249
+ raw_data["neg_cat_ids"] = self.seq2cls[seq_id]["neg_cat_ids"]
250
+ raw_data["not_exh_labeled_cls"] = self.seq2cls[seq_id][
251
+ "not_exh_labeled_cat_ids"
252
+ ]
253
+ raw_data["seq"] = seq
254
+ return raw_data
255
+
256
+ def get_preprocessed_seq_data_thr(self, raw_data, cls, assignment=None):
257
+ """Preprocess data for a single sequence for a single class.
258
+
259
+ Inputs:
260
+ raw_data: dict containing the data for the sequence already
261
+ read in by get_raw_seq_data().
262
+ cls: class to be evaluated.
263
+ Outputs:
264
+ gt_ids:
265
+ list (for each timestep) of ids of GT tracks
266
+ tk_ids:
267
+ list (for each timestep) of ids of predicted tracks (all for TP
268
+ matching (Det + AssocA))
269
+ tk_overlap_ids:
270
+ list (for each timestep) of ids of predicted tracks that overlap
271
+ with GTs
272
+ tk_neg_ids:
273
+ list (for each timestep) of ids of predicted tracks that with
274
+ the class id on the negative list for the current sequence.
275
+ tk_exh_ids:
276
+ list (for each timestep) of ids of predicted tracks that do not
277
+ overlap with existing GTs but have the class id on the
278
+ exhaustive annotated class list for the current sequence.
279
+ tk_dets:
280
+ list (for each timestep) of lists of detections that
281
+ corresponding to the tk_ids
282
+ tk_classes:
283
+ list (for each timestep) of lists of classes that corresponding
284
+ to the tk_ids
285
+ tk_confidences:
286
+ list (for each timestep) of lists of classes that corresponding
287
+ to the tk_ids
288
+ sim_scores:
289
+ similarity score between gt_ids and tk_ids.
290
+ """
291
+ if cls != "all":
292
+ cls_id = self.cls_name2clsid[cls]
293
+
294
+ data_keys = [
295
+ "gt_ids",
296
+ "tk_ids",
297
+ "gt_id_map",
298
+ "tk_id_map",
299
+ "gt_dets",
300
+ "gt_classes",
301
+ "gt_class_name",
302
+ "tk_overlap_classes",
303
+ "tk_overlap_ids",
304
+ "tk_neg_ids",
305
+ "tk_exh_ids",
306
+ "tk_class_eval_tk_ids",
307
+ "tk_dets",
308
+ "tk_classes",
309
+ "tk_confidences",
310
+ "sim_scores",
311
+ ]
312
+ data = {key: [None] * raw_data["num_timesteps"] for key in data_keys}
313
+ unique_gt_ids = []
314
+ unique_tk_ids = []
315
+ num_gt_dets = 0
316
+ num_tk_cls_dets = 0
317
+ num_tk_overlap_dets = 0
318
+ overlap_ious_thr = 0.5
319
+ loc_and_asso_tk_ids = []
320
+
321
+ for t in range(raw_data["num_timesteps"]):
322
+ # only extract relevant dets for this class for preproc and eval
323
+ if cls == "all":
324
+ gt_class_mask = np.ones_like(raw_data["gt_classes"][t]).astype(bool)
325
+ else:
326
+ gt_class_mask = np.atleast_1d(
327
+ raw_data["gt_classes"][t] == cls_id
328
+ ).astype(bool)
329
+
330
+ # select GT that is not in the evaluating classes
331
+ if assignment is not None and assignment:
332
+ all_gt_ids = list(assignment[t].keys())
333
+ gt_ids_in = raw_data["gt_ids"][t][gt_class_mask]
334
+ gt_ids_out = set(all_gt_ids) - set(gt_ids_in)
335
+ tk_ids_out = set([assignment[t][key] for key in list(gt_ids_out)])
336
+
337
+ # compute overlapped tracks and add their ids to overlap_tk_ids
338
+ sim_scores = raw_data["similarity_scores"]
339
+ overlap_ids_masks = (sim_scores[t][gt_class_mask] >= overlap_ious_thr).any(
340
+ axis=0
341
+ )
342
+ overlap_tk_ids_t = raw_data["tk_ids"][t][overlap_ids_masks]
343
+ if assignment is not None and assignment:
344
+ data["tk_overlap_ids"][t] = list(set(overlap_tk_ids_t) - tk_ids_out)
345
+ else:
346
+ data["tk_overlap_ids"][t] = list(set(overlap_tk_ids_t))
347
+
348
+ loc_and_asso_tk_ids += data["tk_overlap_ids"][t]
349
+
350
+ data["tk_exh_ids"][t] = []
351
+ data["tk_neg_ids"][t] = []
352
+
353
+ if cls == "all":
354
+ continue
355
+
356
+ # remove tk_ids that has been assigned to GT belongs to other classes.
357
+ loc_and_asso_tk_ids = list(set(loc_and_asso_tk_ids))
358
+
359
+ # remove all unwanted unmatched tracker detections
360
+ for t in range(raw_data["num_timesteps"]):
361
+ # add gt to the data
362
+ if cls == "all":
363
+ gt_class_mask = np.ones_like(raw_data["gt_classes"][t]).astype(bool)
364
+ else:
365
+ gt_class_mask = np.atleast_1d(
366
+ raw_data["gt_classes"][t] == cls_id
367
+ ).astype(bool)
368
+ data["gt_classes"][t] = cls_id
369
+ data["gt_class_name"][t] = cls
370
+
371
+ gt_ids = raw_data["gt_ids"][t][gt_class_mask]
372
+ if self.use_mask:
373
+ gt_dets = [raw_data['gt_dets'][t][ind] for ind in range(len(gt_class_mask)) if gt_class_mask[ind]]
374
+ else:
375
+ gt_dets = raw_data["gt_dets"][t][gt_class_mask]
376
+ data["gt_ids"][t] = gt_ids
377
+ data["gt_dets"][t] = gt_dets
378
+
379
+ # filter pred and only keep those that highly overlap with GTs
380
+ tk_mask = np.isin(
381
+ raw_data["tk_ids"][t], np.array(loc_and_asso_tk_ids), assume_unique=True
382
+ )
383
+ tk_overlap_mask = np.isin(
384
+ raw_data["tk_ids"][t],
385
+ np.array(data["tk_overlap_ids"][t]),
386
+ assume_unique=True,
387
+ )
388
+
389
+ tk_ids = raw_data["tk_ids"][t][tk_mask]
390
+ if self.use_mask:
391
+ tk_dets = [raw_data['tk_dets'][t][ind] for ind in range(len(tk_mask)) if
392
+ tk_mask[ind]]
393
+ else:
394
+ tk_dets = raw_data["tk_dets"][t][tk_mask]
395
+ tracker_classes = raw_data["tk_classes"][t][tk_mask]
396
+
397
+ # add overlap classes for computing the FP for Cls term
398
+ tracker_overlap_classes = raw_data["tk_classes"][t][tk_overlap_mask]
399
+ tracker_confidences = raw_data["tk_confidences"][t][tk_mask]
400
+ sim_scores_masked = sim_scores[t][gt_class_mask, :][:, tk_mask]
401
+
402
+ # add filtered prediction to the data
403
+ data["tk_classes"][t] = tracker_classes
404
+ data["tk_overlap_classes"][t] = tracker_overlap_classes
405
+ data["tk_ids"][t] = tk_ids
406
+ data["tk_dets"][t] = tk_dets
407
+ data["tk_confidences"][t] = tracker_confidences
408
+ data["sim_scores"][t] = sim_scores_masked
409
+ data["tk_class_eval_tk_ids"][t] = set(
410
+ list(data["tk_overlap_ids"][t])
411
+ + list(data["tk_neg_ids"][t])
412
+ + list(data["tk_exh_ids"][t])
413
+ )
414
+
415
+ # count total number of detections
416
+ unique_gt_ids += list(np.unique(data["gt_ids"][t]))
417
+ # the unique track ids are for association.
418
+ unique_tk_ids += list(np.unique(data["tk_ids"][t]))
419
+
420
+ num_tk_overlap_dets += len(data["tk_overlap_ids"][t])
421
+ num_tk_cls_dets += len(data["tk_class_eval_tk_ids"][t])
422
+ num_gt_dets += len(data["gt_ids"][t])
423
+
424
+ # re-label IDs such that there are no empty IDs
425
+ if len(unique_gt_ids) > 0:
426
+ unique_gt_ids = np.unique(unique_gt_ids)
427
+ gt_id_map = np.nan * np.ones((np.max(unique_gt_ids) + 1))
428
+ gt_id_map[unique_gt_ids] = np.arange(len(unique_gt_ids))
429
+ data["gt_id_map"] = {}
430
+ for gt_id in unique_gt_ids:
431
+ new_gt_id = gt_id_map[gt_id].astype(int)
432
+ data["gt_id_map"][new_gt_id] = gt_id
433
+
434
+ for t in range(raw_data["num_timesteps"]):
435
+ if len(data["gt_ids"][t]) > 0:
436
+ data["gt_ids"][t] = gt_id_map[data["gt_ids"][t]].astype(int)
437
+
438
+ if len(unique_tk_ids) > 0:
439
+ unique_tk_ids = np.unique(unique_tk_ids)
440
+ tk_id_map = np.nan * np.ones((np.max(unique_tk_ids) + 1))
441
+ tk_id_map[unique_tk_ids] = np.arange(len(unique_tk_ids))
442
+
443
+ data["tk_id_map"] = {}
444
+ for track_id in unique_tk_ids:
445
+ new_track_id = tk_id_map[track_id].astype(int)
446
+ data["tk_id_map"][new_track_id] = track_id
447
+
448
+ for t in range(raw_data["num_timesteps"]):
449
+ if len(data["tk_ids"][t]) > 0:
450
+ data["tk_ids"][t] = tk_id_map[data["tk_ids"][t]].astype(int)
451
+ if len(data["tk_overlap_ids"][t]) > 0:
452
+ data["tk_overlap_ids"][t] = tk_id_map[
453
+ data["tk_overlap_ids"][t]
454
+ ].astype(int)
455
+
456
+ # record overview statistics.
457
+ data["num_tk_cls_dets"] = num_tk_cls_dets
458
+ data["num_tk_overlap_dets"] = num_tk_overlap_dets
459
+ data["num_gt_dets"] = num_gt_dets
460
+ data["num_tk_ids"] = len(unique_tk_ids)
461
+ data["num_gt_ids"] = len(unique_gt_ids)
462
+ data["num_timesteps"] = raw_data["num_timesteps"]
463
+ data["seq"] = raw_data["seq"]
464
+
465
+ self._check_unique_ids(data)
466
+
467
+ return data
468
+
469
+ @_timing.time
470
+ def get_preprocessed_seq_data(
471
+ self, raw_data, cls, assignment=None, thresholds=[50, 75]
472
+ ):
473
+ """Preprocess data for a single sequence for a single class."""
474
+ data = {}
475
+ if thresholds is None:
476
+ thresholds = [50]
477
+ elif isinstance(thresholds, int):
478
+ thresholds = [thresholds]
479
+
480
+ for thr in thresholds:
481
+ assignment_thr = None
482
+ if assignment is not None:
483
+ assignment_thr = assignment[thr]
484
+ data[thr] = self.get_preprocessed_seq_data_thr(
485
+ raw_data, cls, assignment_thr
486
+ )
487
+
488
+ return data
489
+
490
+ def _calculate_similarities(self, gt_dets_t, tk_dets_t):
491
+ """Compute similarity scores."""
492
+ if self.use_mask:
493
+ similarity_scores = self._calculate_mask_ious(gt_dets_t, tk_dets_t, is_encoded=True, do_ioa=False)
494
+ else:
495
+ similarity_scores = self._calculate_box_ious(gt_dets_t, tk_dets_t)
496
+ return similarity_scores
497
+
498
+ def _merge_categories(self, annotations):
499
+ """Merges categories with a merged tag.
500
+
501
+ Adapted from https://github.com/TAO-Dataset.
502
+ """
503
+ merge_map = {}
504
+ for category in self.gt_data["categories"]:
505
+ if "merged" in category:
506
+ for to_merge in category["merged"]:
507
+ merge_map[to_merge["id"]] = category["id"]
508
+
509
+ for ann in annotations:
510
+ ann["category_id"] = merge_map.get(ann["category_id"], ann["category_id"])
511
+
512
+ def _compute_vid_mappings(self, annotations):
513
+ """Computes mappings from videos to corresponding tracks and images."""
514
+ vids_to_tracks = {}
515
+ vids_to_imgs = {}
516
+ vid_ids = [vid["id"] for vid in self.gt_data["videos"]]
517
+
518
+ # compute an mapping from image IDs to images
519
+ images = {}
520
+ for image in self.gt_data["images"]:
521
+ images[image["id"]] = image
522
+
523
+ for ann in annotations:
524
+ ann["area"] = ann["bbox"][2] * ann["bbox"][3]
525
+
526
+ vid = ann["video_id"]
527
+ if ann["video_id"] not in vids_to_tracks.keys():
528
+ vids_to_tracks[ann["video_id"]] = list()
529
+ if ann["video_id"] not in vids_to_imgs.keys():
530
+ vids_to_imgs[ann["video_id"]] = list()
531
+
532
+ # fill in vids_to_tracks
533
+ tid = ann["track_id"]
534
+ exist_tids = [track["id"] for track in vids_to_tracks[vid]]
535
+ try:
536
+ index1 = exist_tids.index(tid)
537
+ except ValueError:
538
+ index1 = -1
539
+ if tid not in exist_tids:
540
+ curr_track = {
541
+ "id": tid,
542
+ "category_id": ann["category_id"],
543
+ "video_id": vid,
544
+ "annotations": [ann],
545
+ }
546
+ vids_to_tracks[vid].append(curr_track)
547
+ else:
548
+ vids_to_tracks[vid][index1]["annotations"].append(ann)
549
+
550
+ # fill in vids_to_imgs
551
+ img_id = ann["image_id"]
552
+ exist_img_ids = [img["id"] for img in vids_to_imgs[vid]]
553
+ try:
554
+ index2 = exist_img_ids.index(img_id)
555
+ except ValueError:
556
+ index2 = -1
557
+ if index2 == -1:
558
+ curr_img = {"id": img_id, "annotations": [ann]}
559
+ vids_to_imgs[vid].append(curr_img)
560
+ else:
561
+ vids_to_imgs[vid][index2]["annotations"].append(ann)
562
+
563
+ # sort annotations by frame index and compute track area
564
+ for vid, tracks in vids_to_tracks.items():
565
+ for track in tracks:
566
+ track["annotations"] = sorted(
567
+ track["annotations"],
568
+ key=lambda x: images[x["image_id"]]["frame_index"],
569
+ )
570
+ # compute average area
571
+ track["area"] = sum(x["area"] for x in track["annotations"]) / len(
572
+ track["annotations"]
573
+ )
574
+
575
+ # ensure all videos are present
576
+ for vid_id in vid_ids:
577
+ if vid_id not in vids_to_tracks.keys():
578
+ vids_to_tracks[vid_id] = []
579
+ if vid_id not in vids_to_imgs.keys():
580
+ vids_to_imgs[vid_id] = []
581
+
582
+ return vids_to_tracks, vids_to_imgs
583
+
584
+ def _compute_image_to_timestep_mappings(self):
585
+ """Computes a mapping from images to timestep in sequence."""
586
+ images = {}
587
+ for image in self.gt_data["images"]:
588
+ images[image["id"]] = image
589
+
590
+ seq_to_imgs_to_timestep = {vid["id"]: dict() for vid in self.gt_data["videos"]}
591
+ for vid in seq_to_imgs_to_timestep:
592
+ curr_imgs = [img["id"] for img in self.video2gt_image[vid]]
593
+ curr_imgs = sorted(curr_imgs, key=lambda x: images[x]["frame_index"])
594
+ seq_to_imgs_to_timestep[vid] = {
595
+ curr_imgs[i]: i for i in range(len(curr_imgs))
596
+ }
597
+
598
+ return seq_to_imgs_to_timestep
599
+
600
+ def _limit_dets_per_image(self, annotations):
601
+ """Limits the number of detections for each image.
602
+
603
+ Adapted from https://github.com/TAO-Dataset/.
604
+ """
605
+ max_dets = self.config["MAX_DETECTIONS"]
606
+ img_ann = defaultdict(list)
607
+ for ann in annotations:
608
+ img_ann[ann["image_id"]].append(ann)
609
+
610
+ for img_id, _anns in img_ann.items():
611
+ if len(_anns) <= max_dets:
612
+ continue
613
+ _anns = sorted(_anns, key=lambda x: x["score"], reverse=True)
614
+ img_ann[img_id] = _anns[:max_dets]
615
+
616
+ return [ann for anns in img_ann.values() for ann in anns]
617
+
618
+ def _fill_video_ids_inplace(self, annotations):
619
+ """Fills in missing video IDs inplace.
620
+
621
+ Adapted from https://github.com/TAO-Dataset/.
622
+ """
623
+ missing_video_id = [x for x in annotations if "video_id" not in x]
624
+ if missing_video_id:
625
+ image_id_to_video_id = {
626
+ x["id"]: x["video_id"] for x in self.gt_data["images"]
627
+ }
628
+ for x in missing_video_id:
629
+ x["video_id"] = image_id_to_video_id[x["image_id"]]
630
+
631
+ @staticmethod
632
+ def _make_tk_ids_unique(annotations):
633
+ """Makes track IDs unqiue over the whole annotation set.
634
+
635
+ Adapted from https://github.com/TAO-Dataset/.
636
+ """
637
+ track_id_videos = {}
638
+ track_ids_to_update = set()
639
+ max_track_id = 0
640
+ for ann in annotations:
641
+ t = ann["track_id"]
642
+ if t not in track_id_videos:
643
+ track_id_videos[t] = ann["video_id"]
644
+
645
+ if ann["video_id"] != track_id_videos[t]:
646
+ # track id is assigned to multiple videos
647
+ track_ids_to_update.add(t)
648
+ max_track_id = max(max_track_id, t)
649
+
650
+ if track_ids_to_update:
651
+ print("true")
652
+ next_id = itertools.count(max_track_id + 1)
653
+ new_tk_ids = defaultdict(lambda: next(next_id))
654
+ for ann in annotations:
655
+ t = ann["track_id"]
656
+ v = ann["video_id"]
657
+ if t in track_ids_to_update:
658
+ ann["track_id"] = new_tk_ids[t, v]
659
+ return len(track_ids_to_update)
source_code/sam3/sam3/eval/teta_eval_toolkit/eval.py ADDED
@@ -0,0 +1,275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # fmt: off
2
+ # flake8: noqa
3
+
4
+ import copy
5
+ import os
6
+ import pickle
7
+ import time
8
+ import traceback
9
+ from functools import partial
10
+ from multiprocessing.pool import Pool
11
+
12
+ import numpy as np
13
+
14
+ from . import _timing, utils
15
+ from .config import get_default_eval_config, init_config
16
+ from .utils import TrackEvalException
17
+
18
+
19
+ class Evaluator:
20
+ """Evaluator class for evaluating different metrics for each datasets."""
21
+
22
+ def __init__(self, config=None):
23
+ """Initialize the evaluator with a config file."""
24
+ self.config = init_config(config, get_default_eval_config(), "Eval")
25
+ # Only run timing analysis if not run in parallel.
26
+ if self.config["TIME_PROGRESS"] and not self.config["USE_PARALLEL"]:
27
+ _timing.DO_TIMING = True
28
+ if self.config["DISPLAY_LESS_PROGRESS"]:
29
+ _timing.DISPLAY_LESS_PROGRESS = True
30
+
31
+ @_timing.time
32
+ def evaluate(self, dataset_list, metrics_list):
33
+ """Evaluate a set of metrics on a set of datasets."""
34
+ config = self.config
35
+ metrics_list = metrics_list
36
+ metric_names = utils.validate_metrics_list(metrics_list)
37
+ dataset_names = [dataset.get_name() for dataset in dataset_list]
38
+ output_res = {}
39
+ output_msg = {}
40
+
41
+ for dataset, dname in zip(dataset_list, dataset_names):
42
+ # Get dataset info about what to evaluate
43
+ output_res[dname] = {}
44
+ output_msg[dname] = {}
45
+ tracker_list, seq_list, class_list = dataset.get_eval_info()
46
+ print(
47
+ f"\nEvaluating {len(tracker_list)} tracker(s) on "
48
+ f"{len(seq_list)} sequence(s) for {len(class_list)} class(es)"
49
+ f" on {dname} dataset using the following "
50
+ f'metrics: {", ".join(metric_names)}\n'
51
+ )
52
+
53
+ # Evaluate each tracker
54
+ for tracker in tracker_list:
55
+ try:
56
+ output_res, output_msg = self.evaluate_tracker(
57
+ tracker,
58
+ dataset,
59
+ dname,
60
+ class_list,
61
+ metrics_list,
62
+ metric_names,
63
+ seq_list,
64
+ output_res,
65
+ output_msg,
66
+ )
67
+ except Exception as err:
68
+ output_res[dname][tracker] = None
69
+ if type(err) == TrackEvalException:
70
+ output_msg[dname][tracker] = str(err)
71
+ else:
72
+ output_msg[dname][tracker] = "Unknown error occurred."
73
+ print("Tracker %s was unable to be evaluated." % tracker)
74
+ print(err)
75
+ traceback.print_exc()
76
+ if config["LOG_ON_ERROR"] is not None:
77
+ with open(config["LOG_ON_ERROR"], "a") as f:
78
+ print(dname, file=f)
79
+ print(tracker, file=f)
80
+ print(traceback.format_exc(), file=f)
81
+ print("\n\n\n", file=f)
82
+ if config["BREAK_ON_ERROR"]:
83
+ raise err
84
+ elif config["RETURN_ON_ERROR"]:
85
+ return output_res, output_msg
86
+
87
+ return output_res, output_msg
88
+
89
+ def evaluate_tracker(
90
+ self,
91
+ tracker,
92
+ dataset,
93
+ dname,
94
+ class_list,
95
+ metrics_list,
96
+ metric_names,
97
+ seq_list,
98
+ output_res,
99
+ output_msg,
100
+ ):
101
+ """Evaluate each sequence in parallel or in series."""
102
+ print("\nEvaluating %s\n" % tracker)
103
+ time_start = time.time()
104
+ config = self.config
105
+ if config["USE_PARALLEL"]:
106
+ with Pool(config["NUM_PARALLEL_CORES"]) as pool:
107
+ _eval_sequence = partial(
108
+ eval_sequence,
109
+ dataset=dataset,
110
+ tracker=tracker,
111
+ class_list=class_list,
112
+ metrics_list=metrics_list,
113
+ metric_names=metric_names,
114
+ )
115
+ results = pool.map(_eval_sequence, seq_list)
116
+ res = dict(zip(seq_list, results))
117
+ else:
118
+ res = {}
119
+ for curr_seq in sorted(seq_list):
120
+ res[curr_seq] = eval_sequence(
121
+ curr_seq, dataset, tracker, class_list, metrics_list, metric_names
122
+ )
123
+
124
+
125
+ # collecting combined cls keys (cls averaged, det averaged, super classes)
126
+ cls_keys = []
127
+ res["COMBINED_SEQ"] = {}
128
+ # combine sequences for each class
129
+ for c_cls in class_list:
130
+ res["COMBINED_SEQ"][c_cls] = {}
131
+ for metric, mname in zip(metrics_list, metric_names):
132
+ curr_res = {
133
+ seq_key: seq_value[c_cls][mname]
134
+ for seq_key, seq_value in res.items()
135
+ if seq_key != "COMBINED_SEQ"
136
+ }
137
+ # combine results over all sequences and then over all classes
138
+ res["COMBINED_SEQ"][c_cls][mname] = metric.combine_sequences(curr_res)
139
+
140
+ # combine classes
141
+ if dataset.should_classes_combine:
142
+ if config["OUTPUT_PER_SEQ_RES"]:
143
+ video_keys = res.keys()
144
+ else:
145
+ video_keys = ["COMBINED_SEQ"]
146
+ for v_key in video_keys:
147
+ cls_keys += ["average"]
148
+ res[v_key]["average"] = {}
149
+ for metric, mname in zip(metrics_list, metric_names):
150
+ cls_res = {
151
+ cls_key: cls_value[mname]
152
+ for cls_key, cls_value in res[v_key].items()
153
+ if cls_key not in cls_keys
154
+ }
155
+ res[v_key]["average"][
156
+ mname
157
+ ] = metric.combine_classes_class_averaged(
158
+ cls_res, ignore_empty=True
159
+ )
160
+
161
+ # combine classes to super classes
162
+ if dataset.use_super_categories:
163
+ for cat, sub_cats in dataset.super_categories.items():
164
+ cls_keys.append(cat)
165
+ res["COMBINED_SEQ"][cat] = {}
166
+ for metric, mname in zip(metrics_list, metric_names):
167
+ cat_res = {
168
+ cls_key: cls_value[mname]
169
+ for cls_key, cls_value in res["COMBINED_SEQ"].items()
170
+ if cls_key in sub_cats
171
+ }
172
+ res["COMBINED_SEQ"][cat][
173
+ mname
174
+ ] = metric.combine_classes_det_averaged(cat_res)
175
+ # Print and output results in various formats
176
+ if config["TIME_PROGRESS"]:
177
+ print(
178
+ f"\nAll sequences for {tracker} finished in"
179
+ f" {time.time() - time_start} seconds"
180
+ )
181
+ output_fol = dataset.get_output_fol(tracker)
182
+ os.makedirs(output_fol, exist_ok=True)
183
+
184
+ # take a mean of each field of each thr
185
+ if config["OUTPUT_PER_SEQ_RES"]:
186
+ all_res = copy.deepcopy(res)
187
+ summary_keys = res.keys()
188
+ else:
189
+ all_res = copy.deepcopy(res["COMBINED_SEQ"])
190
+ summary_keys = ["COMBINED_SEQ"]
191
+ thr_key_list = [50]
192
+ for s_key in summary_keys:
193
+ for metric, mname in zip(metrics_list, metric_names):
194
+ if mname != "TETA":
195
+ if s_key == "COMBINED_SEQ":
196
+ metric.print_table(
197
+ {"COMBINED_SEQ": res["COMBINED_SEQ"][cls_keys[0]][mname]},
198
+ tracker,
199
+ cls_keys[0],
200
+ )
201
+ continue
202
+
203
+ for c_cls in res[s_key].keys():
204
+ for thr in thr_key_list:
205
+ all_res[s_key][c_cls][mname][thr] = metric._summary_row(
206
+ res[s_key][c_cls][mname][thr]
207
+ )
208
+ x = (
209
+ np.array(list(all_res[s_key][c_cls]["TETA"].values()))
210
+ .astype("float")
211
+ .mean(axis=0)
212
+ )
213
+ all_res_summary = list(x.round(decimals=2).astype("str"))
214
+ all_res[s_key][c_cls][mname]["ALL"] = all_res_summary
215
+ if config["OUTPUT_SUMMARY"] and s_key == "COMBINED_SEQ":
216
+ for t in thr_key_list:
217
+ metric.print_summary_table(
218
+ all_res[s_key][cls_keys[0]][mname][t],
219
+ t,
220
+ tracker,
221
+ cls_keys[0],
222
+ )
223
+
224
+ if config["OUTPUT_TEM_RAW_DATA"]:
225
+ out_file = os.path.join(output_fol, "teta_summary_results.pth")
226
+ pickle.dump(all_res, open(out_file, "wb"))
227
+ print("Saved the TETA summary results.")
228
+
229
+ # output
230
+ output_res[dname][mname] = all_res[s_key][cls_keys[0]][mname][t]
231
+ output_msg[dname][tracker] = "Success"
232
+
233
+ return output_res, output_msg
234
+
235
+
236
+ @_timing.time
237
+ def eval_sequence(seq, dataset, tracker, class_list, metrics_list, metric_names):
238
+ """Function for evaluating a single sequence."""
239
+ raw_data = dataset.get_raw_seq_data(tracker, seq)
240
+ seq_res = {}
241
+
242
+ if "TETA" in metric_names:
243
+ thresholds = [50]
244
+ data_all_class = dataset.get_preprocessed_seq_data(
245
+ raw_data, "all", thresholds=thresholds
246
+ )
247
+ teta = metrics_list[metric_names.index("TETA")]
248
+ assignment = teta.compute_global_assignment(data_all_class)
249
+
250
+ # create a dict to save Cls_FP for each class in different thr.
251
+ cls_fp = {
252
+ key: {
253
+ cls: np.zeros((len(np.arange(0.5, 0.99, 0.05)))) for cls in class_list
254
+ }
255
+ for key in thresholds
256
+ }
257
+
258
+ for cls in class_list:
259
+ seq_res[cls] = {}
260
+ data = dataset.get_preprocessed_seq_data(raw_data, cls, assignment, thresholds)
261
+
262
+ for metric, mname in zip(metrics_list, metric_names):
263
+ if mname == "TETA":
264
+ seq_res[cls][mname], cls_fp, _ = metric.eval_sequence(
265
+ data, cls, dataset.clsid2cls_name, cls_fp
266
+ )
267
+ else:
268
+ seq_res[cls][mname] = metric.eval_sequence(data)
269
+
270
+ if "TETA" in metric_names:
271
+ for thr in thresholds:
272
+ for cls in class_list:
273
+ seq_res[cls]["TETA"][thr]["Cls_FP"] += cls_fp[thr][cls]
274
+
275
+ return seq_res
source_code/sam3/sam3/eval/teta_eval_toolkit/metrics/_base_metric.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # fmt: off
2
+ # flake8: noqa
3
+
4
+ from abc import ABC, abstractmethod
5
+
6
+ import numpy as np
7
+
8
+ from .. import _timing
9
+ from ..utils import TrackEvalException
10
+
11
+
12
+ class _BaseMetric(ABC):
13
+ @abstractmethod
14
+ def __init__(self):
15
+ self.plottable = False
16
+ self.integer_fields = []
17
+ self.float_fields = []
18
+ self.array_labels = []
19
+ self.integer_array_fields = []
20
+ self.float_array_fields = []
21
+ self.fields = []
22
+ self.summary_fields = []
23
+ self.registered = False
24
+
25
+ #####################################################################
26
+ # Abstract functions for subclasses to implement
27
+
28
+ @_timing.time
29
+ @abstractmethod
30
+ def eval_sequence(self, data):
31
+ ...
32
+
33
+ @abstractmethod
34
+ def combine_sequences(self, all_res):
35
+ ...
36
+
37
+ @abstractmethod
38
+ def combine_classes_class_averaged(self, all_res, ignore_empty=False):
39
+ ...
40
+
41
+ @abstractmethod
42
+ def combine_classes_det_averaged(self, all_res):
43
+ ...
44
+
45
+ def plot_single_tracker_results(self, all_res, tracker, output_folder, cls):
46
+ """Plot results, only valid for metrics with self.plottable."""
47
+ if self.plottable:
48
+ raise NotImplementedError(
49
+ f"plot_results is not implemented for metric {self.get_name()}"
50
+ )
51
+ else:
52
+ pass
53
+
54
+ #####################################################################
55
+ # Helper functions which are useful for all metrics:
56
+
57
+ @classmethod
58
+ def get_name(cls):
59
+ return cls.__name__
60
+
61
+ @staticmethod
62
+ def _combine_sum(all_res, field):
63
+ """Combine sequence results via sum"""
64
+ return sum([all_res[k][field] for k in all_res.keys()])
65
+
66
+ @staticmethod
67
+ def _combine_weighted_av(all_res, field, comb_res, weight_field):
68
+ """Combine sequence results via weighted average."""
69
+ return sum(
70
+ [all_res[k][field] * all_res[k][weight_field] for k in all_res.keys()]
71
+ ) / np.maximum(1.0, comb_res[weight_field])
72
+
73
+ def print_table(self, table_res, tracker, cls):
74
+ """Print table of results for all sequences."""
75
+ print("")
76
+ metric_name = self.get_name()
77
+ self._row_print(
78
+ [metric_name + ": " + tracker + "-" + cls] + self.summary_fields
79
+ )
80
+ for seq, results in sorted(table_res.items()):
81
+ if seq == "COMBINED_SEQ":
82
+ continue
83
+ summary_res = self._summary_row(results)
84
+ self._row_print([seq] + summary_res)
85
+ summary_res = self._summary_row(table_res["COMBINED_SEQ"])
86
+ self._row_print(["COMBINED"] + summary_res)
87
+
88
+ def _summary_row(self, results_):
89
+ vals = []
90
+ for h in self.summary_fields:
91
+ if h in self.float_array_fields:
92
+ vals.append("{0:1.5g}".format(100 * np.mean(results_[h])))
93
+ elif h in self.float_fields:
94
+ vals.append("{0:1.5g}".format(100 * float(results_[h])))
95
+ elif h in self.integer_fields:
96
+ vals.append("{0:d}".format(int(results_[h])))
97
+ else:
98
+ raise NotImplementedError(
99
+ "Summary function not implemented for this field type."
100
+ )
101
+ return vals
102
+
103
+ @staticmethod
104
+ def _row_print(*argv):
105
+ """Print results in evenly spaced rows, with more space in first row."""
106
+ if len(argv) == 1:
107
+ argv = argv[0]
108
+ to_print = "%-35s" % argv[0]
109
+ for v in argv[1:]:
110
+ to_print += "%-10s" % str(v)
111
+ print(to_print)
112
+
113
+ def summary_results(self, table_res):
114
+ """Return a simple summary of final results for a tracker."""
115
+ return dict(
116
+ zip(self.summary_fields, self._summary_row(table_res["COMBINED_SEQ"]),)
117
+ )
118
+
119
+ def detailed_results(self, table_res):
120
+ """Return detailed final results for a tracker."""
121
+ # Get detailed field information
122
+ detailed_fields = self.float_fields + self.integer_fields
123
+ for h in self.float_array_fields + self.integer_array_fields:
124
+ for alpha in [int(100 * x) for x in self.array_labels]:
125
+ detailed_fields.append(h + "___" + str(alpha))
126
+ detailed_fields.append(h + "___AUC")
127
+
128
+ # Get detailed results
129
+ detailed_results = {}
130
+ for seq, res in table_res.items():
131
+ detailed_row = self._detailed_row(res)
132
+ if len(detailed_row) != len(detailed_fields):
133
+ raise TrackEvalException(
134
+ f"Field names and data have different sizes "
135
+ f"({len(detailed_row)} and {len(detailed_fields)})"
136
+ )
137
+ detailed_results[seq] = dict(zip(detailed_fields, detailed_row))
138
+ return detailed_results
139
+
140
+ def _detailed_row(self, res):
141
+ detailed_row = []
142
+ for h in self.float_fields + self.integer_fields:
143
+ detailed_row.append(res[h])
144
+ for h in self.float_array_fields + self.integer_array_fields:
145
+ for i, _ in enumerate([int(100 * x) for x in self.array_labels]):
146
+ detailed_row.append(res[h][i])
147
+ detailed_row.append(np.mean(res[h]))
148
+ return detailed_row
source_code/sam3/sam3/eval/ytvis_eval.py ADDED
@@ -0,0 +1,411 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
2
+ import copy
3
+ import gc
4
+ import logging
5
+ import os
6
+ from collections import defaultdict
7
+ from operator import xor
8
+ from pathlib import Path
9
+ from typing import List, Optional
10
+
11
+ import numpy as np
12
+ import pycocotools.mask as mask_util
13
+ import torch
14
+ from pycocotools.cocoeval import COCOeval
15
+ from sam3.eval.cgf1_eval import CGF1Eval
16
+ from sam3.eval.coco_eval_offline import convert_to_xywh
17
+ from sam3.model.box_ops import box_xywh_inter_union
18
+ from sam3.train.masks_ops import rle_encode
19
+ from sam3.train.utils import distributed as dist
20
+ from typing_extensions import override
21
+
22
+ try:
23
+ import rapidjson as json
24
+ except ModuleNotFoundError:
25
+ import json
26
+
27
+ from iopath.common.file_io import g_pathmgr
28
+
29
+
30
+ class YTVISevalMixin:
31
+ """
32
+ Identical to COCOeval but adapts computeIoU to compute IoU between tracklets/masklets.
33
+ """
34
+
35
+ @override
36
+ def _prepare(self):
37
+ """
38
+ Copied from cocoeval.py but doesn't convert masks to RLEs (we assume they already are RLEs)
39
+ """
40
+ p = self.params
41
+ if p.useCats:
42
+ gts = self.cocoGt.loadAnns(
43
+ self.cocoGt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds)
44
+ )
45
+ dts = self.cocoDt.loadAnns(
46
+ self.cocoDt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds)
47
+ )
48
+ else:
49
+ gts = self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds))
50
+ dts = self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds))
51
+
52
+ # set ignore flag
53
+ for gt in gts:
54
+ gt["ignore"] = gt["ignore"] if "ignore" in gt else 0
55
+ gt["ignore"] = "iscrowd" in gt and gt["iscrowd"]
56
+ if p.iouType == "keypoints":
57
+ gt["ignore"] = (gt["num_keypoints"] == 0) or gt["ignore"]
58
+ self._gts = defaultdict(list) # gt for evaluation
59
+ self._dts = defaultdict(list) # dt for evaluation
60
+ for gt in gts:
61
+ self._gts[gt["image_id"], gt["category_id"]].append(gt)
62
+ for dt in dts:
63
+ self._dts[dt["image_id"], dt["category_id"]].append(dt)
64
+ self.evalImgs = defaultdict(list) # per-image per-category evaluation results
65
+ self.eval = {} # accumulated evaluation results
66
+
67
+ def computeIoU(self, imgId, catId):
68
+ """
69
+ Compute IoU between tracklets. Copied from cocoeval.py but adapted for videos (in YT-VIS format)
70
+ """
71
+ p = self.params
72
+ if p.useCats:
73
+ gt = self._gts[imgId, catId]
74
+ dt = self._dts[imgId, catId]
75
+ else:
76
+ gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]]
77
+ dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]]
78
+ if len(gt) == 0 or len(dt) == 0:
79
+ return []
80
+
81
+ # For class mAP and phrase AP evaluation, we sort the detections in descending order of scores (as in COCOeval).
82
+ # For demo F1 evaluation, we DO NOT sort the detections (but match them with GTs via Hungarian matching).
83
+ assert hasattr(self, "sort_inds_by_scores_in_iou"), (
84
+ "subclasses that inherits YTVISevalMixin should set `self.sort_inds_by_scores_in_iou` "
85
+ "(True for class mAP and phrase AP, False for demo F1)"
86
+ )
87
+ if self.sort_inds_by_scores_in_iou:
88
+ inds = np.argsort([-d["score"] for d in dt], kind="mergesort")
89
+ dt = [dt[i] for i in inds]
90
+ if len(dt) > p.maxDets[-1]:
91
+ dt = dt[0 : p.maxDets[-1]]
92
+
93
+ if p.iouType == "segm":
94
+ g = [g["segmentations"] for g in gt]
95
+ d = [d["segmentations"] for d in dt]
96
+ elif p.iouType == "bbox":
97
+ g = [g["bboxes"] for g in gt]
98
+ d = [d["bboxes"] for d in dt]
99
+ else:
100
+ raise Exception("unknown iouType for iou computation")
101
+
102
+ def iou_tracklets(preds, gts):
103
+ preds = torch.tensor(preds)
104
+ gts = torch.tensor(gts)
105
+ inter, union = box_xywh_inter_union(
106
+ preds.unsqueeze(1), gts.unsqueeze(0)
107
+ ) # Num preds x Num GTS x Num frames
108
+ inter = inter.sum(-1)
109
+ union = union.sum(-1)
110
+ assert (
111
+ union > 0
112
+ ).all(), (
113
+ "There exists a tracklet with zero GTs across time. This is suspicious"
114
+ )
115
+ return inter / union
116
+
117
+ def iou_masklets(preds, gts):
118
+ inter = 0
119
+ union = 0
120
+ for p_i, gt_i in zip(preds, gts):
121
+ if p_i and gt_i:
122
+ # Compute areas of intersection and union
123
+ inter += mask_util.area(
124
+ mask_util.merge([p_i, gt_i], intersect=True)
125
+ )
126
+ union += mask_util.area(
127
+ mask_util.merge([p_i, gt_i], intersect=False)
128
+ )
129
+ elif gt_i:
130
+ union += mask_util.area(gt_i)
131
+ elif p_i:
132
+ union += mask_util.area(p_i)
133
+ if union > 0:
134
+ iou = inter / union
135
+ assert iou >= 0 and iou <= 1, "Encountered an error in IoU computation"
136
+ else:
137
+ assert np.isclose(inter, 0) and np.isclose(
138
+ union, 0
139
+ ), "Encountered an error in IoU computation"
140
+ iou = 1
141
+ return iou
142
+
143
+ if p.iouType == "segm":
144
+ ious = [[iou_masklets(d_i, g_i) for g_i in g] for d_i in d]
145
+ else:
146
+ ious = iou_tracklets(d, g)
147
+ return np.array(ious)
148
+
149
+
150
+ class YTVISeval(YTVISevalMixin, COCOeval):
151
+ # For class mAP and phrase AP evaluation, we sort the detections in descending order of scores (as in COCOeval).
152
+ sort_inds_by_scores_in_iou = True
153
+
154
+
155
+ class VideoDemoF1Eval(YTVISevalMixin, CGF1Eval):
156
+ # For demo F1 evaluation, we DO NOT sort the detections (but match them with GTs via Hungarian matching).
157
+ sort_inds_by_scores_in_iou = False
158
+
159
+
160
+ class YTVISResultsWriter:
161
+ """
162
+ Gather and dumps predictions in YT-VIS format.
163
+ Expected flow of API calls: reset() -> N * update() -> compute_synced()
164
+ """
165
+
166
+ def __init__(
167
+ self,
168
+ dump_file: str,
169
+ postprocessor,
170
+ gather_pred_via_filesys=False,
171
+ pred_file_evaluators: Optional[List] = None,
172
+ save_per_frame_scores: bool = False,
173
+ write_eval_metrics_file: bool = True,
174
+ eval_metrics_file_suffix: str = ".sam3_eval_metrics",
175
+ ):
176
+ self.dump_file = dump_file
177
+ self.dump = []
178
+ self.postprocessor = postprocessor
179
+ self.gather_pred_via_filesys = gather_pred_via_filesys
180
+ if dist.is_main_process():
181
+ dirname = os.path.dirname(self.dump_file)
182
+ if not os.path.exists(dirname):
183
+ os.makedirs(dirname, exist_ok=True)
184
+ logging.info(f"Creating folder: {dirname}")
185
+
186
+ # the evaluation hooks to be applied to the prediction files
187
+ self.pred_file_evaluators = pred_file_evaluators or []
188
+ self.save_per_frame_scores = save_per_frame_scores
189
+ # in addition to the prediction file, we also write the evaluation metrics
190
+ # for easier debugging and analysis (stored in another eval_metrics_file
191
+ # so that we can keep the dumped prediction file under YT-VIS format)
192
+ self.write_eval_metrics_file = write_eval_metrics_file
193
+ if self.write_eval_metrics_file:
194
+ self.eval_metrics_file = self.dump_file + eval_metrics_file_suffix
195
+ os.makedirs(os.path.dirname(self.eval_metrics_file), exist_ok=True)
196
+
197
+ def _dump_vid_preds(self, results):
198
+ dumped_results = copy.deepcopy(results)
199
+ self.dump.extend(dumped_results)
200
+
201
+ def prepare(self, predictions):
202
+ ytvis_results = []
203
+ for video_id, prediction in predictions.items():
204
+ if len(prediction) == 0:
205
+ continue
206
+ for k in ["boxes", "scores", "labels"]:
207
+ assert (
208
+ k in prediction
209
+ ), f"Expected predictions to have `{k}` key, available keys are {prediction.keys()}"
210
+ if self.save_per_frame_scores:
211
+ assert (
212
+ "per_frame_scores" in prediction
213
+ ), f"Expected predictions to have `per_frame_scores` key, available keys are {prediction.keys()}"
214
+ assert xor(
215
+ "masks" in prediction, "masks_rle" in prediction
216
+ ), f"Expected predictions to have either `masks` key or `masks_rle` key, available keys are {prediction.keys()}"
217
+
218
+ boxes = prediction["boxes"]
219
+ boxes = convert_to_xywh(boxes).tolist()
220
+ scores = prediction["scores"].tolist()
221
+ labels = prediction["labels"].tolist()
222
+ if "masks" in prediction:
223
+ masks = prediction["masks"].squeeze(2)
224
+ assert (
225
+ masks.ndim == 4
226
+ ), "Expected masks to be of shape(N_preds,T_frames,H,W)"
227
+
228
+ areas = [mask.flatten(1).sum(1).tolist() for mask in masks]
229
+ rles = [rle_encode(masklet) for masklet in masks]
230
+
231
+ # memory clean
232
+ del masks
233
+ del prediction["masks"]
234
+ elif "masks_rle" in prediction:
235
+ rles = prediction.pop("masks_rle")
236
+ areas = [
237
+ [0 if rle is None else rle.pop("area") for rle in rles_per_obj]
238
+ for rles_per_obj in rles
239
+ ]
240
+ else:
241
+ raise ValueError(
242
+ "Expected either `masks` or `masks_rle` key in the predictions."
243
+ )
244
+
245
+ new_results = [
246
+ {
247
+ "video_id": video_id,
248
+ "category_id": track_label,
249
+ "bboxes": track_boxes,
250
+ "score": track_score,
251
+ "segmentations": track_masks,
252
+ "areas": track_areas,
253
+ }
254
+ for (
255
+ track_boxes,
256
+ track_masks,
257
+ track_areas,
258
+ track_score,
259
+ track_label,
260
+ ) in zip(boxes, rles, areas, scores, labels)
261
+ ]
262
+ # Optionally, save per-frame scores
263
+ if self.save_per_frame_scores:
264
+ per_frame_scores = prediction["per_frame_scores"].tolist()
265
+ for res, track_per_frame_scores in zip(new_results, per_frame_scores):
266
+ res["per_frame_scores"] = track_per_frame_scores
267
+
268
+ ytvis_results.extend(new_results)
269
+
270
+ return ytvis_results
271
+
272
+ def set_sync_device(self, device: torch.device):
273
+ self._sync_device = device
274
+
275
+ def update(self, *args, **kwargs):
276
+ predictions = self.postprocessor.process_results(*args, **kwargs)
277
+ results = self.prepare(predictions)
278
+ self._dump_vid_preds(results)
279
+
280
+ def _dump_preds(self):
281
+ if not dist.is_main_process():
282
+ self.dump = []
283
+ gc.collect()
284
+ return
285
+ dumped_file = Path(self.dump_file)
286
+ logging.info(f"YTVIS evaluator: Dumping predictions to {dumped_file}")
287
+ with g_pathmgr.open(str(dumped_file), "w") as f:
288
+ json.dump(self.dump, f)
289
+ self.dump = []
290
+ gc.collect()
291
+ return str(dumped_file)
292
+
293
+ def synchronize_between_processes(self):
294
+ logging.info("YT-VIS evaluator: Synchronizing between processes")
295
+ dump_dict = self._dedup_pre_gather(self.dump)
296
+ if self.gather_pred_via_filesys:
297
+ dump_dict_all_gpus = dist.gather_to_rank_0_via_filesys(dump_dict)
298
+ else:
299
+ dump_dict_all_gpus = dist.all_gather(dump_dict, force_cpu=True)
300
+ self.dump = self._dedup_post_gather(dump_dict_all_gpus)
301
+ logging.info(f"Gathered all {len(self.dump)} predictions")
302
+
303
+ def _dedup_pre_gather(self, predictions):
304
+ """
305
+ Organize the predictions as a dict-of-list using (video_id, category_id) as keys
306
+ for deduplication after gathering them across GPUs.
307
+
308
+ During evaluation, PyTorch data loader under `drop_last: False` would wrap
309
+ around the dataset length to be a multiple of world size (GPU num) and duplicate
310
+ the remaining batches. This causes the same test sample to appear simultaneously
311
+ in multiple GPUs, resulting in duplicated predictions being saved into prediction
312
+ files. These duplicates are then counted as false positives under detection mAP
313
+ metrics (since a ground truth can be matched with only one prediction).
314
+
315
+ For example, if there are 4 GPUs and 6 samples [A1, A2, B1, B2, C1, C2], the data
316
+ loader (under `drop_last: False`) would load it by wrapping it around like
317
+ `[A1, A2, B1, B2, C1, C2, *A1*, *A2*]` to make a multiple of 4 and then split it as
318
+
319
+ - GPU 0: A1, C1
320
+ - GPU 1: A2, C2
321
+ - GPU 3: B1, **A1**
322
+ - GPU 4: B2, **A2**
323
+ (as in DistributedSampler in https://github.com/pytorch/pytorch/blob/521588519da9f4876d90ddd7a17c10d0eca89dc6/torch/utils/data/distributed.py#L116-L124)
324
+
325
+ so the predictions on A1 and A2 will occur twice in the final gathered outputs
326
+ in the prediction file (and counted as false positives). This also affects our
327
+ YT-VIS official val evaluation, but to a lesser extent than YT-VIS dev since
328
+ the latter is much smaller and more susceptible to false positives.
329
+
330
+ So we to deduplicate this. The tricky part is that we cannot deduplicate them
331
+ simply using video id, given that we are sharding the classes in each video
332
+ across multiple batches (with 20 prompts per batch) in our "orig_cats" eval dbs.
333
+
334
+ The solution is to deduplicate based on (video_id, category_id) tuple as keys.
335
+ We organize the predictions as a dict-of-list using (video_id, category_id) as
336
+ keys on each GPU, with the list of masklets under this (video_id, category_id)
337
+ on this GPU as values. Then, we all-gather this dict-of-list across GPUs and
338
+ if a key (video_id, category_id) appears in multiple GPUs, we only take the
339
+ prediction masklet list from one GPU.
340
+ """
341
+ prediction_dict = defaultdict(list)
342
+ for p in predictions:
343
+ prediction_dict[(p["video_id"], p["category_id"])].append(p)
344
+ return prediction_dict
345
+
346
+ def _dedup_post_gather(self, list_of_prediction_dict):
347
+ """
348
+ Deduplicate the predictions from all GPUs. See `_dedup_pre_gather` for details.
349
+ """
350
+ dedup_prediction_dict = {}
351
+ duplication_keys = []
352
+ for prediction_dict in list_of_prediction_dict:
353
+ for k, v in prediction_dict.items():
354
+ if k not in dedup_prediction_dict:
355
+ dedup_prediction_dict[k] = v
356
+ else:
357
+ duplication_keys.append(k)
358
+
359
+ logging.info(
360
+ f"skipped {len(duplication_keys)} duplicated predictions in YTVISResultsWriter "
361
+ f"with the following (video_id, category_id) tuples: {duplication_keys}"
362
+ )
363
+ dedup_predictions = sum(dedup_prediction_dict.values(), [])
364
+ return dedup_predictions
365
+
366
+ def compute_synced(
367
+ self,
368
+ ):
369
+ self.synchronize_between_processes()
370
+ dumped_file = self._dump_preds()
371
+ if not dist.is_main_process():
372
+ return {"": 0.0}
373
+
374
+ # run evaluation hooks on the prediction file
375
+ meters = {}
376
+ all_video_np_level_results = defaultdict(dict)
377
+ for evaluator in self.pred_file_evaluators:
378
+ gc.collect()
379
+ results, video_np_level_results = evaluator.evaluate(dumped_file)
380
+ meters.update(results)
381
+ for (video_id, category_id), res in video_np_level_results.items():
382
+ all_video_np_level_results[(video_id, category_id)].update(res)
383
+
384
+ gc.collect()
385
+ if self.write_eval_metrics_file:
386
+ # convert the nested dict of {(video_id, category_id): per_sample_metric_dict}
387
+ # to a list of per-sample metric dicts (with video_id and category_id) for JSON,
388
+ # as JSON doesn't allow using tuples like (video_id, category_id) as dict keys
389
+ video_np_level_metrics = [
390
+ {"video_id": video_id, "category_id": category_id, **res}
391
+ for (video_id, category_id), res in all_video_np_level_results.items()
392
+ ]
393
+ eval_metrics = {
394
+ "dataset_level_metrics": meters,
395
+ "video_np_level_metrics": video_np_level_metrics,
396
+ }
397
+ with g_pathmgr.open(self.eval_metrics_file, "w") as f:
398
+ json.dump(eval_metrics, f)
399
+ logging.info(
400
+ f"YTVIS evaluator: Dumped evaluation metrics to {self.eval_metrics_file}"
401
+ )
402
+
403
+ if len(meters) == 0:
404
+ meters = {"": 0.0}
405
+ return meters
406
+
407
+ def compute(self):
408
+ return {"": 0.0}
409
+
410
+ def reset(self, *args, **kwargs):
411
+ self.dump = []
source_code/sam3/sam3/model/act_ckpt_utils.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
2
+
3
+ import inspect
4
+ from functools import wraps
5
+ from typing import Callable, TypeVar, Union
6
+
7
+ import torch
8
+ import torch.nn as nn
9
+ import torch.utils.checkpoint as checkpoint
10
+ from torch.utils._pytree import tree_map_only
11
+
12
+ # Type variables for better type hinting
13
+ T = TypeVar("T")
14
+ Module = TypeVar("Module", bound=nn.Module)
15
+
16
+
17
+ def activation_ckpt_wrapper(module: Union[nn.Module, Callable]) -> Callable:
18
+ """
19
+ Wraps a given module to enable or disable activation checkpointing.
20
+
21
+ Activation checkpointing (gradient checkpointing) trades compute for memory by
22
+ recomputing intermediate activations during the backward pass instead of storing
23
+ them in memory during the forward pass.
24
+
25
+ When activation checkpointing is enabled, the wrapper expects only keyword arguments,
26
+ and it maps these to positional arguments based on the module's signature.
27
+
28
+ Args:
29
+ module: The module or function to wrap with activation checkpointing
30
+
31
+ Returns:
32
+ A wrapped callable that supports activation checkpointing
33
+
34
+ Usage:
35
+ The returned wrapper function can be called with the same arguments as the
36
+ original module, with an additional `act_ckpt_enable` keyword argument to control
37
+ activation checkpointing and optional `use_reentrant` parameter.
38
+
39
+ Example:
40
+ ```python
41
+ wrapped_module = activation_ckpt_wrapper(my_module)
42
+ output = wrapped_module(x=input_tensor, y=another_tensor, act_ckpt_enable=True)
43
+ ```
44
+ """
45
+
46
+ @wraps(module)
47
+ def act_ckpt_wrapper(
48
+ *args, act_ckpt_enable: bool = True, use_reentrant: bool = False, **kwargs
49
+ ):
50
+ if act_ckpt_enable:
51
+ if len(args) > 0:
52
+ raise ValueError(
53
+ "This wrapper expects keyword arguments only when `act_ckpt_enable=True`"
54
+ )
55
+ # Get the signature of the target function/module
56
+ callable_fn = module.forward if isinstance(module, nn.Module) else module
57
+ sig = inspect.signature(callable_fn)
58
+ # Create a mapping of parameter names to their default values
59
+ param_defaults = {
60
+ name: param.default for name, param in sig.parameters.items()
61
+ }
62
+ args = []
63
+ for p_name in param_defaults.keys():
64
+ if p_name in kwargs:
65
+ args.append(kwargs.pop(p_name))
66
+ elif param_defaults[p_name] is not inspect.Parameter.empty:
67
+ # Set arg to default value if it's not in kwargs. Useful for primitive types or args that default to None
68
+ args.append(param_defaults[p_name])
69
+ elif (
70
+ sig.parameters[p_name].kind is not inspect.Parameter.VAR_KEYWORD
71
+ ): # Skip **kwargs parameter
72
+ raise ValueError(f"Missing positional argument: {p_name}")
73
+
74
+ # Scan remaining kwargs for torch.Tensor
75
+ remaining_keys = list(kwargs.keys())
76
+ for key in remaining_keys:
77
+ if isinstance(kwargs[key], torch.Tensor):
78
+ # Remove the tensor from kwargs, assuming it's not required by the module.
79
+ # If it is required, the module's signature should be modified to accept it as a positional or keyword argument.
80
+ kwargs[key] = "_REMOVED_BY_ACT_CKPT_WRAPPER_"
81
+
82
+ ret = checkpoint.checkpoint(
83
+ module, *args, use_reentrant=use_reentrant, **kwargs
84
+ )
85
+ else:
86
+ ret = module(*args, **kwargs)
87
+
88
+ return ret
89
+
90
+ return act_ckpt_wrapper
91
+
92
+
93
+ def clone_output_wrapper(f: Callable[..., T]) -> Callable[..., T]:
94
+ """
95
+ Clone the CUDA output tensors of a function to avoid in-place operations.
96
+
97
+ This wrapper is useful when working with torch.compile to prevent errors
98
+ related to in-place operations on tensors.
99
+
100
+ Args:
101
+ f: The function whose CUDA tensor outputs should be cloned
102
+
103
+ Returns:
104
+ A wrapped function that clones any CUDA tensor outputs
105
+ """
106
+
107
+ @wraps(f)
108
+ def wrapped(*args, **kwargs):
109
+ outputs = f(*args, **kwargs)
110
+ return tree_map_only(
111
+ torch.Tensor, lambda t: t.clone() if t.is_cuda else t, outputs
112
+ )
113
+
114
+ return wrapped
source_code/sam3/sam3/model/encoder.py ADDED
@@ -0,0 +1,594 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
2
+ # Based on https://github.com/IDEA-Research/GroundingDINO
3
+
4
+ from typing import Any, Dict, List, Optional, Tuple
5
+
6
+ import torch
7
+ from torch import nn, Tensor
8
+
9
+ from .act_ckpt_utils import activation_ckpt_wrapper
10
+ from .model_misc import get_activation_fn, get_clones, get_valid_ratio
11
+
12
+
13
+ class TransformerEncoderLayer(nn.Module):
14
+ """
15
+ Transformer encoder layer that performs self-attention followed by cross-attention.
16
+
17
+ This layer was previously called TransformerDecoderLayer but was renamed to better
18
+ reflect its role in the architecture. It processes input sequences through self-attention
19
+ and then cross-attention with another input (typically image features).
20
+
21
+ The layer supports both pre-norm and post-norm configurations, as well as
22
+ positional encoding at different stages of the attention mechanism.
23
+ """
24
+
25
+ def __init__(
26
+ self,
27
+ activation: str,
28
+ cross_attention: nn.Module,
29
+ d_model: int,
30
+ dim_feedforward: int,
31
+ dropout: float,
32
+ pos_enc_at_attn: bool,
33
+ pos_enc_at_cross_attn_keys: bool,
34
+ pos_enc_at_cross_attn_queries: bool,
35
+ pre_norm: bool,
36
+ self_attention: nn.Module,
37
+ ):
38
+ """
39
+ Initialize a transformer encoder layer.
40
+
41
+ Args:
42
+ activation: Activation function to use in the feedforward network
43
+ cross_attention: Cross-attention module for attending to image features
44
+ d_model: Model dimension/hidden size
45
+ dim_feedforward: Dimension of the feedforward network
46
+ dropout: Dropout probability
47
+ pos_enc_at_attn: Whether to add positional encodings at self-attention
48
+ pos_enc_at_cross_attn_keys: Whether to add positional encodings to keys in cross-attention
49
+ pos_enc_at_cross_attn_queries: Whether to add positional encodings to queries in cross-attention
50
+ pre_norm: Whether to use pre-norm (True) or post-norm (False) architecture
51
+ self_attention: Self-attention module
52
+ """
53
+ super().__init__()
54
+ self.d_model = d_model
55
+ self.dim_feedforward = dim_feedforward
56
+ self.dropout_value = dropout
57
+ self.self_attn = self_attention
58
+ self.cross_attn_image = cross_attention
59
+
60
+ # Implementation of Feedforward model
61
+ self.linear1 = nn.Linear(d_model, dim_feedforward)
62
+ self.dropout = nn.Dropout(dropout)
63
+ self.linear2 = nn.Linear(dim_feedforward, d_model)
64
+
65
+ self.norm1 = nn.LayerNorm(d_model)
66
+ self.norm2 = nn.LayerNorm(d_model)
67
+ self.norm3 = nn.LayerNorm(d_model)
68
+ self.dropout1 = nn.Dropout(dropout)
69
+ self.dropout2 = nn.Dropout(dropout)
70
+ self.dropout3 = nn.Dropout(dropout)
71
+
72
+ self.activation_str = activation
73
+ self.activation = get_activation_fn(activation)
74
+ self.pre_norm = pre_norm
75
+
76
+ self.pos_enc_at_attn = pos_enc_at_attn
77
+ self.pos_enc_at_cross_attn_queries = pos_enc_at_cross_attn_queries
78
+ self.pos_enc_at_cross_attn_keys = pos_enc_at_cross_attn_keys
79
+
80
+ self.layer_idx = None
81
+
82
+ def forward_post(
83
+ self,
84
+ tgt: Tensor,
85
+ memory: Tensor,
86
+ tgt_mask: Optional[Tensor] = None,
87
+ memory_mask: Optional[Tensor] = None,
88
+ tgt_key_padding_mask: Optional[Tensor] = None,
89
+ memory_key_padding_mask: Optional[Tensor] = None,
90
+ pos: Optional[Tensor] = None,
91
+ query_pos: Optional[Tensor] = None,
92
+ **kwargs,
93
+ ) -> Tensor:
94
+ """
95
+ Forward pass for post-norm architecture.
96
+
97
+ In post-norm architecture, normalization is applied after attention and feedforward operations.
98
+
99
+ Args:
100
+ tgt: Input tensor to be processed
101
+ memory: Memory tensor for cross-attention
102
+ tgt_mask: Mask for self-attention
103
+ memory_mask: Mask for cross-attention
104
+ tgt_key_padding_mask: Key padding mask for self-attention
105
+ memory_key_padding_mask: Key padding mask for cross-attention
106
+ pos: Positional encoding for memory
107
+ query_pos: Positional encoding for query
108
+ **kwargs: Additional keyword arguments
109
+
110
+ Returns:
111
+ Processed tensor
112
+ """
113
+ q = k = tgt + query_pos if self.pos_enc_at_attn else tgt
114
+
115
+ # Self attention
116
+ tgt2 = self.self_attn(
117
+ q, k, value=tgt, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask
118
+ )[0]
119
+ tgt = tgt + self.dropout1(tgt2)
120
+ tgt = self.norm1(tgt)
121
+
122
+ # Cross attention to image
123
+ tgt2 = self.cross_attn_image(
124
+ query=tgt + query_pos if self.pos_enc_at_cross_attn_queries else tgt,
125
+ key=memory + pos if self.pos_enc_at_cross_attn_keys else memory,
126
+ value=memory,
127
+ attn_mask=memory_mask,
128
+ key_padding_mask=memory_key_padding_mask,
129
+ )[0]
130
+ tgt = tgt + self.dropout2(tgt2)
131
+ tgt = self.norm2(tgt)
132
+
133
+ # FFN
134
+ tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
135
+ tgt = tgt + self.dropout3(tgt2)
136
+ tgt = self.norm3(tgt)
137
+ return tgt
138
+
139
+ def forward_pre(
140
+ self,
141
+ tgt: Tensor,
142
+ memory: Tensor,
143
+ dac: bool = False,
144
+ tgt_mask: Optional[Tensor] = None,
145
+ memory_mask: Optional[Tensor] = None,
146
+ tgt_key_padding_mask: Optional[Tensor] = None,
147
+ memory_key_padding_mask: Optional[Tensor] = None,
148
+ pos: Optional[Tensor] = None,
149
+ query_pos: Optional[Tensor] = None,
150
+ # attn_bias: Optional[Tensor] = None,
151
+ # **kwargs,
152
+ ) -> Tensor:
153
+ """
154
+ Forward pass for pre-norm architecture.
155
+
156
+ In pre-norm architecture, normalization is applied before attention and feedforward operations.
157
+
158
+ Args:
159
+ tgt: Input tensor to be processed
160
+ memory: Memory tensor for cross-attention
161
+ dac: Whether to use Divide-and-Conquer attention
162
+ tgt_mask: Mask for self-attention
163
+ memory_mask: Mask for cross-attention
164
+ tgt_key_padding_mask: Key padding mask for self-attention
165
+ memory_key_padding_mask: Key padding mask for cross-attention
166
+ pos: Positional encoding for memory
167
+ query_pos: Positional encoding for query
168
+ attn_bias: Optional attention bias tensor
169
+ **kwargs: Additional keyword arguments
170
+
171
+ Returns:
172
+ Processed tensor
173
+ """
174
+ if dac:
175
+ # we only apply self attention to the first half of the queries
176
+ assert tgt.shape[0] % 2 == 0
177
+ other_tgt = tgt[tgt.shape[0] // 2 :]
178
+ tgt = tgt[: tgt.shape[0] // 2]
179
+ tgt2 = self.norm1(tgt)
180
+ q = k = tgt2 + query_pos if self.pos_enc_at_attn else tgt2
181
+ tgt2 = self.self_attn(
182
+ q, k, value=tgt2, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask
183
+ )[0]
184
+ tgt = tgt + self.dropout1(tgt2)
185
+ if dac:
186
+ # Recombine
187
+ tgt = torch.cat((tgt, other_tgt), dim=0)
188
+ tgt2 = self.norm2(tgt)
189
+ tgt2 = self.cross_attn_image(
190
+ query=tgt2 + query_pos if self.pos_enc_at_cross_attn_queries else tgt2,
191
+ key=memory + pos if self.pos_enc_at_cross_attn_keys else memory,
192
+ value=memory,
193
+ attn_mask=memory_mask,
194
+ key_padding_mask=memory_key_padding_mask,
195
+ # attn_bias=attn_bias,
196
+ )[0]
197
+ tgt = tgt + self.dropout2(tgt2)
198
+ tgt2 = self.norm3(tgt)
199
+ tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
200
+ tgt = tgt + self.dropout3(tgt2)
201
+ return tgt
202
+
203
+ def forward(
204
+ self,
205
+ tgt: Tensor,
206
+ memory: Tensor,
207
+ dac: bool = False,
208
+ tgt_mask: Optional[Tensor] = None,
209
+ memory_mask: Optional[Tensor] = None,
210
+ tgt_key_padding_mask: Optional[Tensor] = None,
211
+ memory_key_padding_mask: Optional[Tensor] = None,
212
+ pos: Optional[Tensor] = None,
213
+ query_pos: Optional[Tensor] = None,
214
+ # attn_bias: Optional[Tensor] = None,
215
+ # **kwds: Any,
216
+ ) -> torch.Tensor:
217
+ """
218
+ Forward pass for the transformer encoder layer.
219
+
220
+ Args:
221
+ tgt: Input tensor to be processed
222
+ memory: Memory tensor (e.g., image features) for cross-attention
223
+ dac: Whether to use Divide-and-Conquer attention (only apply self-attention to first half)
224
+ tgt_mask: Mask for self-attention
225
+ memory_mask: Mask for cross-attention
226
+ tgt_key_padding_mask: Key padding mask for self-attention
227
+ memory_key_padding_mask: Key padding mask for cross-attention
228
+ pos: Positional encoding for memory
229
+ query_pos: Positional encoding for query
230
+ attn_bias: Optional attention bias tensor
231
+ **kwds: Additional keyword arguments
232
+
233
+ Returns:
234
+ Processed tensor after self-attention, cross-attention, and feedforward network
235
+ """
236
+ fwd_fn = self.forward_pre if self.pre_norm else self.forward_post
237
+ return fwd_fn(
238
+ tgt,
239
+ memory,
240
+ dac=dac,
241
+ tgt_mask=tgt_mask,
242
+ memory_mask=memory_mask,
243
+ tgt_key_padding_mask=tgt_key_padding_mask,
244
+ memory_key_padding_mask=memory_key_padding_mask,
245
+ pos=pos,
246
+ query_pos=query_pos,
247
+ # attn_bias=attn_bias,
248
+ # **kwds,
249
+ )
250
+
251
+
252
+ class TransformerEncoder(nn.Module):
253
+ """
254
+ Transformer encoder that processes multi-level features.
255
+
256
+ This encoder takes multi-level features (e.g., from a backbone network) and processes
257
+ them through a stack of transformer encoder layers. It supports features from multiple
258
+ levels (e.g., different resolutions) and can apply activation checkpointing for memory
259
+ efficiency during training.
260
+
261
+ Args:
262
+ layer: The encoder layer to be stacked multiple times
263
+ num_layers: Number of encoder layers to stack
264
+ d_model: Model dimension/hidden size
265
+ num_feature_levels: Number of feature levels to process
266
+ frozen: Whether to freeze the parameters of this module
267
+ use_act_checkpoint: Whether to use activation checkpointing during training
268
+ """
269
+
270
+ def __init__(
271
+ self,
272
+ layer: nn.Module,
273
+ num_layers: int,
274
+ d_model: int,
275
+ num_feature_levels: int,
276
+ frozen: bool = False,
277
+ use_act_checkpoint: bool = False,
278
+ ):
279
+ super().__init__()
280
+ self.layers = get_clones(layer, num_layers)
281
+ self.num_layers = num_layers
282
+
283
+ self.num_feature_levels = num_feature_levels
284
+ self.level_embed = None
285
+ if num_feature_levels > 1:
286
+ self.level_embed = nn.Parameter(torch.Tensor(num_feature_levels, d_model))
287
+
288
+ if frozen:
289
+ for p in self.parameters():
290
+ p.requires_grad_(False)
291
+
292
+ self.use_act_checkpoint = use_act_checkpoint
293
+
294
+ # assign layer index to each layer so that some layers can decide what to do
295
+ # based on which layer index they are (e.g. cross attention to memory bank only
296
+ # in selected layers)
297
+ for layer_idx, layer in enumerate(self.layers):
298
+ layer.layer_idx = layer_idx
299
+
300
+ @staticmethod
301
+ def get_reference_points(spatial_shapes, valid_ratios, device):
302
+ with torch.no_grad():
303
+ reference_points_list = []
304
+ for lvl, (H_, W_) in enumerate(spatial_shapes):
305
+ ref_y, ref_x = torch.meshgrid(
306
+ torch.linspace(
307
+ 0.5, H_ - 0.5, H_, dtype=torch.float32, device=device
308
+ ),
309
+ torch.linspace(
310
+ 0.5, W_ - 0.5, W_, dtype=torch.float32, device=device
311
+ ),
312
+ )
313
+ ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, lvl, 1] * H_)
314
+ ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, lvl, 0] * W_)
315
+ ref = torch.stack((ref_x, ref_y), -1)
316
+ reference_points_list.append(ref)
317
+ reference_points = torch.cat(reference_points_list, 1)
318
+ reference_points = reference_points[:, :, None] * valid_ratios[:, None]
319
+
320
+ return reference_points
321
+
322
+ def _prepare_multilevel_features(self, srcs, masks, pos_embeds):
323
+ assert (
324
+ len(srcs) == self.num_feature_levels
325
+ ), "mismatch between expected and received # of feature levels"
326
+
327
+ src_flatten = []
328
+ mask_flatten = []
329
+ lvl_pos_embed_flatten = []
330
+ spatial_shapes = []
331
+ has_mask = masks is not None and masks[0] is not None
332
+ for lvl, (src, mask, pos_embed) in enumerate(zip(srcs, masks, pos_embeds)):
333
+ bs, c, h, w = src.shape
334
+ spatial_shape = (h, w)
335
+ spatial_shapes.append(spatial_shape)
336
+
337
+ src = src.flatten(2).transpose(1, 2) # bs, hw, c
338
+ if has_mask:
339
+ mask = mask.flatten(1)
340
+ pos_embed = pos_embed.flatten(2).transpose(1, 2) # bs, hw, c
341
+ if self.level_embed is not None:
342
+ lvl_pos_embed = pos_embed + self.level_embed[lvl].view(1, 1, -1)
343
+ else:
344
+ lvl_pos_embed = pos_embed
345
+ lvl_pos_embed_flatten.append(lvl_pos_embed)
346
+ src_flatten.append(src)
347
+ if has_mask:
348
+ mask_flatten.append(mask)
349
+ src_flatten = torch.cat(src_flatten, 1) # bs, \sum{hxw}, c
350
+ mask_flatten = torch.cat(mask_flatten, 1) if has_mask else None # bs, \sum{hxw}
351
+ lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1) # bs, \sum{hxw}, c
352
+ spatial_shapes = torch.tensor(
353
+ spatial_shapes, dtype=torch.long, device=src_flatten.device
354
+ )
355
+ level_start_index = torch.cat(
356
+ (
357
+ spatial_shapes.new_zeros((1,)),
358
+ spatial_shapes.prod(1).cumsum(0)[:-1],
359
+ )
360
+ )
361
+ if has_mask:
362
+ valid_ratios = torch.stack([get_valid_ratio(m) for m in masks], 1)
363
+ else:
364
+ valid_ratios = torch.ones(
365
+ (src_flatten.shape[0], self.num_feature_levels, 2),
366
+ device=src_flatten.device,
367
+ )
368
+
369
+ return (
370
+ src_flatten,
371
+ mask_flatten,
372
+ lvl_pos_embed_flatten,
373
+ level_start_index,
374
+ valid_ratios,
375
+ spatial_shapes,
376
+ )
377
+
378
+ def forward(
379
+ self,
380
+ src: List[Tensor],
381
+ src_key_padding_masks: Optional[List[Tensor]] = None,
382
+ pos: Optional[List[Tensor]] = None,
383
+ prompt: Optional[Tensor] = None,
384
+ prompt_key_padding_mask: Optional[Tensor] = None,
385
+ encoder_extra_kwargs: Optional[Dict] = None,
386
+ ) -> Tuple[Tensor, Optional[Tensor], Tensor, Tensor, Tensor, Tensor]:
387
+ """
388
+ Process multi-level features through the transformer encoder.
389
+
390
+ Args:
391
+ src: List of multi-level features, each with shape (batch_size, channels, height, width)
392
+ src_key_padding_masks: List of padding masks for each feature level, each with shape (batch_size, height, width)
393
+ pos: List of positional embeddings for each feature level, each with shape (batch_size, channels, height, width)
394
+ prompt: Optional text/prompt features to attend to, with shape (seq_len, batch_size, d_model)
395
+ prompt_key_padding_mask: Optional padding mask for prompt, with shape (batch_size, seq_len)
396
+ encoder_extra_kwargs: Optional additional arguments to pass to each encoder layer
397
+
398
+ Returns:
399
+ A tuple containing:
400
+ - output: Processed features with shape (seq_len, batch_size, d_model)
401
+ - key_padding_masks_flatten: Flattened padding masks
402
+ - lvl_pos_embed_flatten: Flattened positional embeddings
403
+ - level_start_index: Starting indices for each feature level
404
+ - spatial_shapes: Spatial dimensions of each feature level
405
+ - valid_ratios: Valid ratios for each feature level
406
+ """
407
+ assert (
408
+ len(src) == self.num_feature_levels
409
+ ), "must be equal to num_feature_levels"
410
+ if src_key_padding_masks is not None:
411
+ assert len(src_key_padding_masks) == self.num_feature_levels
412
+ if pos is not None:
413
+ assert len(pos) == self.num_feature_levels
414
+ # Flatten multilevel feats and add level pos embeds
415
+ (
416
+ src_flatten,
417
+ key_padding_masks_flatten,
418
+ lvl_pos_embed_flatten,
419
+ level_start_index,
420
+ valid_ratios,
421
+ spatial_shapes,
422
+ ) = self._prepare_multilevel_features(src, src_key_padding_masks, pos)
423
+
424
+ reference_points = self.get_reference_points(
425
+ spatial_shapes, valid_ratios, device=src_flatten.device
426
+ )
427
+
428
+ output = src_flatten
429
+ for layer in self.layers:
430
+ layer_kwargs = {}
431
+
432
+ assert isinstance(layer, TransformerEncoderLayer)
433
+ layer_kwargs["memory"] = prompt
434
+ layer_kwargs["memory_key_padding_mask"] = prompt_key_padding_mask
435
+ layer_kwargs["query_pos"] = lvl_pos_embed_flatten
436
+ layer_kwargs["tgt"] = output
437
+ layer_kwargs["tgt_key_padding_mask"] = key_padding_masks_flatten
438
+
439
+ if self.training:
440
+ assert self.use_act_checkpoint, "activation ckpt not enabled in encoder"
441
+ if encoder_extra_kwargs is not None:
442
+ layer_kwargs.update(encoder_extra_kwargs)
443
+ output = activation_ckpt_wrapper(layer)(
444
+ **layer_kwargs,
445
+ act_ckpt_enable=self.training and self.use_act_checkpoint,
446
+ )
447
+ # return as seq first
448
+ return (
449
+ output.transpose(0, 1),
450
+ (
451
+ key_padding_masks_flatten.transpose(0, 1)
452
+ if key_padding_masks_flatten is not None
453
+ else None
454
+ ),
455
+ lvl_pos_embed_flatten.transpose(0, 1),
456
+ level_start_index,
457
+ spatial_shapes,
458
+ valid_ratios,
459
+ )
460
+
461
+
462
+ class TransformerEncoderFusion(TransformerEncoder):
463
+ """
464
+ Transformer encoder that fuses text and image features.
465
+
466
+ This encoder extends TransformerEncoder to handle both text and image features,
467
+ with the ability to add pooled text features to image features for better
468
+ cross-modal fusion. It supports torch.compile for performance optimization.
469
+
470
+ Args:
471
+ layer: The encoder layer to be stacked multiple times
472
+ num_layers: Number of encoder layers to stack
473
+ d_model: Model dimension/hidden size
474
+ num_feature_levels: Number of feature levels to process
475
+ add_pooled_text_to_img_feat: Whether to add pooled text features to image features
476
+ pool_text_with_mask: Whether to use the mask when pooling text features
477
+ compile_mode: Mode for torch.compile, or None to disable compilation
478
+ **kwargs: Additional arguments to pass to the parent class
479
+ """
480
+
481
+ def __init__(
482
+ self,
483
+ layer: nn.Module,
484
+ num_layers: int,
485
+ d_model: int,
486
+ num_feature_levels: int,
487
+ add_pooled_text_to_img_feat: bool = True,
488
+ pool_text_with_mask: bool = False,
489
+ compile_mode: Optional[str] = None,
490
+ **kwargs,
491
+ ):
492
+ super().__init__(
493
+ layer,
494
+ num_layers,
495
+ d_model,
496
+ num_feature_levels,
497
+ **kwargs,
498
+ )
499
+ self.add_pooled_text_to_img_feat = add_pooled_text_to_img_feat
500
+ if self.add_pooled_text_to_img_feat:
501
+ self.text_pooling_proj = nn.Linear(d_model, d_model)
502
+ self.pool_text_with_mask = pool_text_with_mask
503
+ if compile_mode is not None:
504
+ self.forward = torch.compile(
505
+ self.forward, mode=compile_mode, fullgraph=True
506
+ )
507
+
508
+ @staticmethod
509
+ def get_reference_points(spatial_shapes, valid_ratios, device):
510
+ # Not needed here
511
+ return None
512
+
513
+ def forward(
514
+ self,
515
+ src: List[Tensor],
516
+ prompt: Tensor,
517
+ src_key_padding_mask: Optional[List[Tensor]] = None,
518
+ src_pos: Optional[List[Tensor]] = None,
519
+ prompt_key_padding_mask: Optional[Tensor] = None,
520
+ prompt_pos: Optional[Tensor] = None,
521
+ feat_sizes: Optional[List[int]] = None,
522
+ encoder_extra_kwargs: Optional[Dict] = None,
523
+ ):
524
+ # Restore spatial shapes of vision
525
+ bs = src[0].shape[1] # seq first
526
+ if feat_sizes is not None:
527
+ assert len(feat_sizes) == len(src)
528
+ if src_key_padding_mask is None:
529
+ src_key_padding_mask = [None] * len(src)
530
+ for i, (h, w) in enumerate(feat_sizes):
531
+ src[i] = src[i].reshape(h, w, bs, -1).permute(2, 3, 0, 1)
532
+ src_pos[i] = src_pos[i].reshape(h, w, bs, -1).permute(2, 3, 0, 1)
533
+ src_key_padding_mask[i] = (
534
+ src_key_padding_mask[i].reshape(h, w, bs).permute(2, 0, 1)
535
+ if src_key_padding_mask[i] is not None
536
+ else None
537
+ )
538
+ else:
539
+ assert all(
540
+ x.dim == 4 for x in src
541
+ ), "expected list of (bs, c, h, w) tensors"
542
+
543
+ if self.add_pooled_text_to_img_feat:
544
+ # Fusion: Add mean pooled text to image features
545
+ pooled_text = pool_text_feat(
546
+ prompt, prompt_key_padding_mask, self.pool_text_with_mask
547
+ )
548
+ pooled_text = self.text_pooling_proj(pooled_text)[
549
+ ..., None, None
550
+ ] # prompt is seq first
551
+ src = [x.add_(pooled_text) for x in src]
552
+
553
+ (
554
+ out,
555
+ key_padding_masks_flatten,
556
+ lvl_pos_embed_flatten,
557
+ level_start_index,
558
+ spatial_shapes,
559
+ valid_ratios,
560
+ ) = super().forward(
561
+ src,
562
+ src_key_padding_masks=src_key_padding_mask,
563
+ pos=src_pos,
564
+ prompt=prompt.transpose(0, 1),
565
+ prompt_key_padding_mask=prompt_key_padding_mask,
566
+ encoder_extra_kwargs=encoder_extra_kwargs,
567
+ )
568
+
569
+ return {
570
+ "memory": out,
571
+ "padding_mask": key_padding_masks_flatten,
572
+ "pos_embed": lvl_pos_embed_flatten,
573
+ "memory_text": prompt,
574
+ "level_start_index": level_start_index,
575
+ "spatial_shapes": spatial_shapes,
576
+ "valid_ratios": valid_ratios,
577
+ }
578
+
579
+
580
+ def pool_text_feat(prompt, prompt_mask, pool_with_mask):
581
+ # prompt has shape (seq, bs, dim)
582
+ if not pool_with_mask:
583
+ return prompt.mean(dim=0)
584
+
585
+ # prompt_mask has shape (bs, seq), where False is valid and True is padding
586
+ assert prompt_mask.dim() == 2
587
+ # is_valid has shape (seq, bs, 1), where 1 is valid and 0 is padding
588
+ is_valid = (~prompt_mask).float().permute(1, 0)[..., None]
589
+ # num_valid has shape (bs, 1)
590
+ num_valid = torch.clamp(torch.sum(is_valid, dim=0), min=1.0)
591
+
592
+ # mean pool over all the valid tokens
593
+ pooled_text = (prompt * is_valid).sum(dim=0) / num_valid
594
+ return pooled_text
source_code/sam3/sam3/model/maskformer_segmentation.py ADDED
@@ -0,0 +1,323 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
2
+
3
+ import math
4
+ from typing import Dict, List, Optional
5
+
6
+ import torch
7
+ import torch.nn as nn
8
+ import torch.nn.functional as F
9
+ import torch.utils.checkpoint as checkpoint
10
+
11
+ from .model_misc import MLP
12
+
13
+
14
+ class LinearPresenceHead(nn.Sequential):
15
+ def __init__(self, d_model):
16
+ # a hack to make `LinearPresenceHead` compatible with old checkpoints
17
+ super().__init__(nn.Identity(), nn.Identity(), nn.Linear(d_model, 1))
18
+
19
+ def forward(self, hs, prompt, prompt_mask):
20
+ return super().forward(hs)
21
+
22
+
23
+ class MaskPredictor(nn.Module):
24
+ def __init__(self, hidden_dim, mask_dim):
25
+ super().__init__()
26
+ self.mask_embed = MLP(hidden_dim, hidden_dim, mask_dim, 3)
27
+
28
+ def forward(self, obj_queries, pixel_embed):
29
+ if len(obj_queries.shape) == 3:
30
+ if pixel_embed.ndim == 3:
31
+ # batch size was omitted
32
+ mask_preds = torch.einsum(
33
+ "bqc,chw->bqhw", self.mask_embed(obj_queries), pixel_embed
34
+ )
35
+ else:
36
+ mask_preds = torch.einsum(
37
+ "bqc,bchw->bqhw", self.mask_embed(obj_queries), pixel_embed
38
+ )
39
+ else:
40
+ # Assumed to have aux masks
41
+ if pixel_embed.ndim == 3:
42
+ # batch size was omitted
43
+ mask_preds = torch.einsum(
44
+ "lbqc,chw->lbqhw", self.mask_embed(obj_queries), pixel_embed
45
+ )
46
+ else:
47
+ mask_preds = torch.einsum(
48
+ "lbqc,bchw->lbqhw", self.mask_embed(obj_queries), pixel_embed
49
+ )
50
+
51
+ return mask_preds
52
+
53
+
54
+ class SegmentationHead(nn.Module):
55
+ def __init__(
56
+ self,
57
+ hidden_dim,
58
+ upsampling_stages,
59
+ use_encoder_inputs=False,
60
+ aux_masks=False,
61
+ no_dec=False,
62
+ pixel_decoder=None,
63
+ act_ckpt=False,
64
+ shared_conv=False,
65
+ compile_mode_pixel_decoder=None,
66
+ ):
67
+ super().__init__()
68
+ self.use_encoder_inputs = use_encoder_inputs
69
+ self.aux_masks = aux_masks
70
+ if pixel_decoder is not None:
71
+ self.pixel_decoder = pixel_decoder
72
+ else:
73
+ self.pixel_decoder = PixelDecoder(
74
+ hidden_dim,
75
+ upsampling_stages,
76
+ shared_conv=shared_conv,
77
+ compile_mode=compile_mode_pixel_decoder,
78
+ )
79
+ self.no_dec = no_dec
80
+ if no_dec:
81
+ self.mask_predictor = nn.Conv2d(
82
+ hidden_dim, 1, kernel_size=3, stride=1, padding=1
83
+ )
84
+ else:
85
+ self.mask_predictor = MaskPredictor(hidden_dim, mask_dim=hidden_dim)
86
+
87
+ self.act_ckpt = act_ckpt
88
+
89
+ # used to update the output dictionary
90
+ self.instance_keys = ["pred_masks"]
91
+
92
+ @property
93
+ def device(self):
94
+ self._device = getattr(self, "_device", None) or next(self.parameters()).device
95
+ return self._device
96
+
97
+ def to(self, *args, **kwargs):
98
+ # clear cached _device in case the model is moved to a different device
99
+ self._device = None
100
+ return super().to(*args, **kwargs)
101
+
102
+ def _embed_pixels(
103
+ self,
104
+ backbone_feats: List[torch.Tensor],
105
+ image_ids,
106
+ encoder_hidden_states,
107
+ ) -> torch.Tensor:
108
+ feature_device = backbone_feats[0].device # features could be on CPU
109
+ model_device = self.device
110
+ image_ids_ = image_ids.to(feature_device)
111
+ if self.use_encoder_inputs:
112
+ if backbone_feats[0].shape[0] > 1:
113
+ # For bs > 1, we construct the per query backbone features
114
+ backbone_visual_feats = []
115
+ for feat in backbone_feats:
116
+ # Copy the img features per query (pixel decoder won't share img feats)
117
+ backbone_visual_feats.append(feat[image_ids_, ...].to(model_device))
118
+ else:
119
+ # Bs=1, we rely on broadcasting for query-based processing
120
+ backbone_visual_feats = [bb_feat.clone() for bb_feat in backbone_feats]
121
+ # Extract visual embeddings
122
+ encoder_hidden_states = encoder_hidden_states.permute(1, 2, 0)
123
+ spatial_dim = math.prod(backbone_feats[-1].shape[-2:])
124
+ encoder_visual_embed = encoder_hidden_states[..., :spatial_dim].reshape(
125
+ -1, *backbone_feats[-1].shape[1:]
126
+ )
127
+
128
+ backbone_visual_feats[-1] = encoder_visual_embed
129
+ if self.act_ckpt:
130
+ pixel_embed = checkpoint.checkpoint(
131
+ self.pixel_decoder, backbone_visual_feats, use_reentrant=False
132
+ )
133
+ else:
134
+ pixel_embed = self.pixel_decoder(backbone_visual_feats)
135
+ else:
136
+ backbone_feats = [x.to(model_device) for x in backbone_feats]
137
+ pixel_embed = self.pixel_decoder(backbone_feats)
138
+ if pixel_embed.shape[0] == 1:
139
+ # For batch_size=1 training, we can avoid the indexing to save memory
140
+ pixel_embed = pixel_embed.squeeze(0)
141
+ else:
142
+ pixel_embed = pixel_embed[image_ids, ...]
143
+ return pixel_embed
144
+
145
+ def forward(
146
+ self,
147
+ backbone_feats: List[torch.Tensor],
148
+ obj_queries: torch.Tensor,
149
+ image_ids,
150
+ encoder_hidden_states: Optional[torch.Tensor] = None,
151
+ **kwargs,
152
+ ) -> Dict[str, torch.Tensor]:
153
+ if self.use_encoder_inputs:
154
+ assert encoder_hidden_states is not None
155
+
156
+ pixel_embed = self._embed_pixels(
157
+ backbone_feats=backbone_feats,
158
+ image_ids=image_ids,
159
+ encoder_hidden_states=encoder_hidden_states,
160
+ )
161
+
162
+ if self.no_dec:
163
+ mask_pred = self.mask_predictor(pixel_embed)
164
+ elif self.aux_masks:
165
+ mask_pred = self.mask_predictor(obj_queries, pixel_embed)
166
+ else:
167
+ mask_pred = self.mask_predictor(obj_queries[-1], pixel_embed)
168
+
169
+ return {"pred_masks": mask_pred}
170
+
171
+
172
+ class PixelDecoder(nn.Module):
173
+ def __init__(
174
+ self,
175
+ hidden_dim,
176
+ num_upsampling_stages,
177
+ interpolation_mode="nearest",
178
+ shared_conv=False,
179
+ compile_mode=None,
180
+ ):
181
+ super().__init__()
182
+ self.hidden_dim = hidden_dim
183
+ self.num_upsampling_stages = num_upsampling_stages
184
+ self.interpolation_mode = interpolation_mode
185
+ conv_layers = []
186
+ norms = []
187
+ num_convs = 1 if shared_conv else num_upsampling_stages
188
+ for _ in range(num_convs):
189
+ conv_layers.append(nn.Conv2d(self.hidden_dim, self.hidden_dim, 3, 1, 1))
190
+ norms.append(nn.GroupNorm(8, self.hidden_dim))
191
+
192
+ self.conv_layers = nn.ModuleList(conv_layers)
193
+ self.norms = nn.ModuleList(norms)
194
+ self.shared_conv = shared_conv
195
+ self.out_dim = self.conv_layers[-1].out_channels
196
+ if compile_mode is not None:
197
+ self.forward = torch.compile(
198
+ self.forward, mode=compile_mode, dynamic=True, fullgraph=True
199
+ )
200
+ # Needed to make checkpointing happy. But we don't know if the module is checkpointed, so we disable it by default.
201
+ torch._dynamo.config.optimize_ddp = False
202
+
203
+ def forward(self, backbone_feats: List[torch.Tensor]):
204
+ # Assumes backbone features are already projected (C == hidden dim)
205
+
206
+ prev_fpn = backbone_feats[-1]
207
+ fpn_feats = backbone_feats[:-1]
208
+ for layer_idx, bb_feat in enumerate(fpn_feats[::-1]):
209
+ curr_fpn = bb_feat
210
+ prev_fpn = curr_fpn + F.interpolate(
211
+ prev_fpn, size=curr_fpn.shape[-2:], mode=self.interpolation_mode
212
+ )
213
+ if self.shared_conv:
214
+ # only one conv layer
215
+ layer_idx = 0
216
+ prev_fpn = self.conv_layers[layer_idx](prev_fpn)
217
+ prev_fpn = F.relu(self.norms[layer_idx](prev_fpn))
218
+
219
+ return prev_fpn
220
+
221
+
222
+ class UniversalSegmentationHead(SegmentationHead):
223
+ """This module handles semantic+instance segmentation"""
224
+
225
+ def __init__(
226
+ self,
227
+ hidden_dim,
228
+ upsampling_stages,
229
+ pixel_decoder,
230
+ aux_masks=False,
231
+ no_dec=False,
232
+ act_ckpt=False,
233
+ presence_head: bool = False,
234
+ dot_product_scorer=None,
235
+ cross_attend_prompt=None,
236
+ ):
237
+ super().__init__(
238
+ hidden_dim=hidden_dim,
239
+ upsampling_stages=upsampling_stages,
240
+ use_encoder_inputs=True,
241
+ aux_masks=aux_masks,
242
+ no_dec=no_dec,
243
+ pixel_decoder=pixel_decoder,
244
+ act_ckpt=act_ckpt,
245
+ )
246
+ self.d_model = hidden_dim
247
+
248
+ if dot_product_scorer is not None:
249
+ assert presence_head, "Specifying a dot product scorer without a presence head is likely a mistake"
250
+
251
+ self.presence_head = None
252
+ if presence_head:
253
+ self.presence_head = (
254
+ dot_product_scorer
255
+ if dot_product_scorer is not None
256
+ else LinearPresenceHead(self.d_model)
257
+ )
258
+
259
+ self.cross_attend_prompt = cross_attend_prompt
260
+ if self.cross_attend_prompt is not None:
261
+ self.cross_attn_norm = nn.LayerNorm(self.d_model)
262
+
263
+ self.semantic_seg_head = nn.Conv2d(self.pixel_decoder.out_dim, 1, kernel_size=1)
264
+ self.instance_seg_head = nn.Conv2d(
265
+ self.pixel_decoder.out_dim, self.d_model, kernel_size=1
266
+ )
267
+
268
+ def forward(
269
+ self,
270
+ backbone_feats: List[torch.Tensor],
271
+ obj_queries: torch.Tensor,
272
+ image_ids,
273
+ encoder_hidden_states: Optional[torch.Tensor] = None,
274
+ prompt: Optional[torch.Tensor] = None,
275
+ prompt_mask: Optional[torch.Tensor] = None,
276
+ **kwargs,
277
+ ) -> Dict[str, Optional[torch.Tensor]]:
278
+ assert encoder_hidden_states is not None
279
+ bs = encoder_hidden_states.shape[1]
280
+
281
+ if self.cross_attend_prompt is not None:
282
+ tgt2 = self.cross_attn_norm(encoder_hidden_states)
283
+ tgt2 = self.cross_attend_prompt(
284
+ query=tgt2,
285
+ key=prompt,
286
+ value=prompt,
287
+ key_padding_mask=prompt_mask,
288
+ )[0]
289
+ encoder_hidden_states = tgt2 + encoder_hidden_states
290
+
291
+ presence_logit = None
292
+ if self.presence_head is not None:
293
+ pooled_enc = encoder_hidden_states.mean(0)
294
+ presence_logit = (
295
+ self.presence_head(
296
+ pooled_enc.view(1, bs, 1, self.d_model),
297
+ prompt=prompt,
298
+ prompt_mask=prompt_mask,
299
+ )
300
+ .squeeze(0)
301
+ .squeeze(1)
302
+ )
303
+
304
+ pixel_embed = self._embed_pixels(
305
+ backbone_feats=backbone_feats,
306
+ image_ids=image_ids,
307
+ encoder_hidden_states=encoder_hidden_states,
308
+ )
309
+
310
+ instance_embeds = self.instance_seg_head(pixel_embed)
311
+
312
+ if self.no_dec:
313
+ mask_pred = self.mask_predictor(instance_embeds)
314
+ elif self.aux_masks:
315
+ mask_pred = self.mask_predictor(obj_queries, instance_embeds)
316
+ else:
317
+ mask_pred = self.mask_predictor(obj_queries[-1], instance_embeds)
318
+
319
+ return {
320
+ "pred_masks": mask_pred,
321
+ "semantic_seg": self.semantic_seg_head(pixel_embed),
322
+ "presence_logit": presence_logit,
323
+ }
source_code/sam3/sam3/model/sam1_task_predictor.py ADDED
@@ -0,0 +1,458 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ import logging
8
+
9
+ from typing import List, Optional, Tuple, Union
10
+
11
+ import numpy as np
12
+ import torch
13
+
14
+ import torch.nn as nn
15
+ from PIL.Image import Image
16
+
17
+ from sam3.model.sam3_tracker_base import Sam3TrackerBase
18
+ from sam3.model.utils.sam1_utils import SAM2Transforms
19
+
20
+
21
+ # Adapted from https://github.com/facebookresearch/sam2/blob/main/sam2/sam2_image_predictor.py
22
+ class SAM3InteractiveImagePredictor(nn.Module):
23
+ def __init__(
24
+ self,
25
+ sam_model: Sam3TrackerBase,
26
+ mask_threshold=0.0,
27
+ max_hole_area=256.0,
28
+ max_sprinkle_area=0.0,
29
+ **kwargs,
30
+ ) -> None:
31
+ """
32
+ Uses SAM-3 to calculate the image embedding for an image, and then
33
+ allow repeated, efficient mask prediction given prompts.
34
+
35
+ Arguments:
36
+ sam_model : The model to use for mask prediction.
37
+ mask_threshold (float): The threshold to use when converting mask logits
38
+ to binary masks. Masks are thresholded at 0 by default.
39
+ max_hole_area (int): If max_hole_area > 0, we fill small holes in up to
40
+ the maximum area of max_hole_area in low_res_masks.
41
+ max_sprinkle_area (int): If max_sprinkle_area > 0, we remove small sprinkles up to
42
+ the maximum area of max_sprinkle_area in low_res_masks.
43
+ """
44
+ super().__init__()
45
+ self.model = sam_model
46
+ self._transforms = SAM2Transforms(
47
+ resolution=self.model.image_size,
48
+ mask_threshold=mask_threshold,
49
+ max_hole_area=max_hole_area,
50
+ max_sprinkle_area=max_sprinkle_area,
51
+ )
52
+
53
+ # Predictor state
54
+ self._is_image_set = False
55
+ self._features = None
56
+ self._orig_hw = None
57
+ # Whether the predictor is set for single image or a batch of images
58
+ self._is_batch = False
59
+
60
+ # Predictor config
61
+ self.mask_threshold = mask_threshold
62
+
63
+ # Spatial dim for backbone feature maps
64
+ self._bb_feat_sizes = [
65
+ (288, 288),
66
+ (144, 144),
67
+ (72, 72),
68
+ ]
69
+
70
+ @torch.no_grad()
71
+ def set_image(
72
+ self,
73
+ image: Union[np.ndarray, Image],
74
+ ) -> None:
75
+ """
76
+ Calculates the image embeddings for the provided image, allowing
77
+ masks to be predicted with the 'predict' method.
78
+
79
+ Arguments:
80
+ image (np.ndarray or PIL Image): The input image to embed in RGB format. The image should be in HWC format if np.ndarray, or WHC format if PIL Image
81
+ with pixel values in [0, 255].
82
+ image_format (str): The color format of the image, in ['RGB', 'BGR'].
83
+ """
84
+ self.reset_predictor()
85
+ # Transform the image to the form expected by the model
86
+ if isinstance(image, np.ndarray):
87
+ logging.info("For numpy array image, we assume (HxWxC) format")
88
+ self._orig_hw = [image.shape[:2]]
89
+ elif isinstance(image, Image):
90
+ w, h = image.size
91
+ self._orig_hw = [(h, w)]
92
+ else:
93
+ raise NotImplementedError("Image format not supported")
94
+
95
+ input_image = self._transforms(image)
96
+ input_image = input_image[None, ...].to(self.device)
97
+
98
+ assert (
99
+ len(input_image.shape) == 4 and input_image.shape[1] == 3
100
+ ), f"input_image must be of size 1x3xHxW, got {input_image.shape}"
101
+ logging.info("Computing image embeddings for the provided image...")
102
+ backbone_out = self.model.forward_image(input_image)
103
+ (
104
+ _,
105
+ vision_feats,
106
+ _,
107
+ _,
108
+ ) = self.model._prepare_backbone_features(backbone_out)
109
+ # Add no_mem_embed, which is added to the lowest rest feat. map during training on videos
110
+ vision_feats[-1] = vision_feats[-1] + self.model.no_mem_embed
111
+
112
+ feats = [
113
+ feat.permute(1, 2, 0).view(1, -1, *feat_size)
114
+ for feat, feat_size in zip(vision_feats[::-1], self._bb_feat_sizes[::-1])
115
+ ][::-1]
116
+ self._features = {"image_embed": feats[-1], "high_res_feats": feats[:-1]}
117
+ self._is_image_set = True
118
+ logging.info("Image embeddings computed.")
119
+
120
+ @torch.no_grad()
121
+ def set_image_batch(
122
+ self,
123
+ image_list: List[Union[np.ndarray]],
124
+ ) -> None:
125
+ """
126
+ Calculates the image embeddings for the provided image batch, allowing
127
+ masks to be predicted with the 'predict_batch' method.
128
+
129
+ Arguments:
130
+ image_list (List[np.ndarray]): The input images to embed in RGB format. The image should be in HWC format if np.ndarray
131
+ with pixel values in [0, 255].
132
+ """
133
+ self.reset_predictor()
134
+ assert isinstance(image_list, list)
135
+ self._orig_hw = []
136
+ for image in image_list:
137
+ assert isinstance(
138
+ image, np.ndarray
139
+ ), "Images are expected to be an np.ndarray in RGB format, and of shape HWC"
140
+ self._orig_hw.append(image.shape[:2])
141
+ # Transform the image to the form expected by the model
142
+ img_batch = self._transforms.forward_batch(image_list)
143
+ img_batch = img_batch.to(self.device)
144
+ batch_size = img_batch.shape[0]
145
+ assert (
146
+ len(img_batch.shape) == 4 and img_batch.shape[1] == 3
147
+ ), f"img_batch must be of size Bx3xHxW, got {img_batch.shape}"
148
+ logging.info("Computing image embeddings for the provided images...")
149
+ backbone_out = self.model.forward_image(img_batch)
150
+ (
151
+ _,
152
+ vision_feats,
153
+ _,
154
+ _,
155
+ ) = self.model._prepare_backbone_features(backbone_out)
156
+ # Add no_mem_embed, which is added to the lowest rest feat. map during training on videos
157
+ vision_feats[-1] = vision_feats[-1] + self.model.no_mem_embed
158
+
159
+ feats = [
160
+ feat.permute(1, 2, 0).view(batch_size, -1, *feat_size)
161
+ for feat, feat_size in zip(vision_feats[::-1], self._bb_feat_sizes[::-1])
162
+ ][::-1]
163
+ self._features = {"image_embed": feats[-1], "high_res_feats": feats[:-1]}
164
+ self._is_image_set = True
165
+ self._is_batch = True
166
+ logging.info("Image embeddings computed.")
167
+
168
+ def predict_batch(
169
+ self,
170
+ point_coords_batch: List[np.ndarray] = None,
171
+ point_labels_batch: List[np.ndarray] = None,
172
+ box_batch: List[np.ndarray] = None,
173
+ mask_input_batch: List[np.ndarray] = None,
174
+ multimask_output: bool = True,
175
+ return_logits: bool = False,
176
+ normalize_coords=True,
177
+ ) -> Tuple[List[np.ndarray], List[np.ndarray], List[np.ndarray]]:
178
+ """This function is very similar to predict(...), however it is used for batched mode, when the model is expected to generate predictions on multiple images.
179
+ It returns a tuple of lists of masks, ious, and low_res_masks_logits.
180
+ """
181
+ assert self._is_batch, "This function should only be used when in batched mode"
182
+ if not self._is_image_set:
183
+ raise RuntimeError(
184
+ "An image must be set with .set_image_batch(...) before mask prediction."
185
+ )
186
+ num_images = len(self._features["image_embed"])
187
+ all_masks = []
188
+ all_ious = []
189
+ all_low_res_masks = []
190
+ for img_idx in range(num_images):
191
+ # Transform input prompts
192
+ point_coords = (
193
+ point_coords_batch[img_idx] if point_coords_batch is not None else None
194
+ )
195
+ point_labels = (
196
+ point_labels_batch[img_idx] if point_labels_batch is not None else None
197
+ )
198
+ box = box_batch[img_idx] if box_batch is not None else None
199
+ mask_input = (
200
+ mask_input_batch[img_idx] if mask_input_batch is not None else None
201
+ )
202
+ mask_input, unnorm_coords, labels, unnorm_box = self._prep_prompts(
203
+ point_coords,
204
+ point_labels,
205
+ box,
206
+ mask_input,
207
+ normalize_coords,
208
+ img_idx=img_idx,
209
+ )
210
+ masks, iou_predictions, low_res_masks = self._predict(
211
+ unnorm_coords,
212
+ labels,
213
+ unnorm_box,
214
+ mask_input,
215
+ multimask_output,
216
+ return_logits=return_logits,
217
+ img_idx=img_idx,
218
+ )
219
+ masks_np = masks.squeeze(0).float().detach().cpu().numpy()
220
+ iou_predictions_np = (
221
+ iou_predictions.squeeze(0).float().detach().cpu().numpy()
222
+ )
223
+ low_res_masks_np = low_res_masks.squeeze(0).float().detach().cpu().numpy()
224
+ all_masks.append(masks_np)
225
+ all_ious.append(iou_predictions_np)
226
+ all_low_res_masks.append(low_res_masks_np)
227
+
228
+ return all_masks, all_ious, all_low_res_masks
229
+
230
+ def predict(
231
+ self,
232
+ point_coords: Optional[np.ndarray] = None,
233
+ point_labels: Optional[np.ndarray] = None,
234
+ box: Optional[np.ndarray] = None,
235
+ mask_input: Optional[np.ndarray] = None,
236
+ multimask_output: bool = True,
237
+ return_logits: bool = False,
238
+ normalize_coords=True,
239
+ ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
240
+ """
241
+ Predict masks for the given input prompts, using the currently set image.
242
+
243
+ Arguments:
244
+ point_coords (np.ndarray or None): A Nx2 array of point prompts to the
245
+ model. Each point is in (X,Y) in pixels.
246
+ point_labels (np.ndarray or None): A length N array of labels for the
247
+ point prompts. 1 indicates a foreground point and 0 indicates a
248
+ background point.
249
+ box (np.ndarray or None): A length 4 array given a box prompt to the
250
+ model, in XYXY format.
251
+ mask_input (np.ndarray): A low resolution mask input to the model, typically
252
+ coming from a previous prediction iteration. Has form 1xHxW, where
253
+ for SAM, H=W=256.
254
+ multimask_output (bool): If true, the model will return three masks.
255
+ For ambiguous input prompts (such as a single click), this will often
256
+ produce better masks than a single prediction. If only a single
257
+ mask is needed, the model's predicted quality score can be used
258
+ to select the best mask. For non-ambiguous prompts, such as multiple
259
+ input prompts, multimask_output=False can give better results.
260
+ return_logits (bool): If true, returns un-thresholded masks logits
261
+ instead of a binary mask.
262
+ normalize_coords (bool): If true, the point coordinates will be normalized to the range [0,1] and point_coords is expected to be wrt. image dimensions.
263
+
264
+ Returns:
265
+ (np.ndarray): The output masks in CxHxW format, where C is the
266
+ number of masks, and (H, W) is the original image size.
267
+ (np.ndarray): An array of length C containing the model's
268
+ predictions for the quality of each mask.
269
+ (np.ndarray): An array of shape CxHxW, where C is the number
270
+ of masks and H=W=256. These low resolution logits can be passed to
271
+ a subsequent iteration as mask input.
272
+ """
273
+ if not self._is_image_set:
274
+ raise RuntimeError(
275
+ "An image must be set with .set_image(...) before mask prediction."
276
+ )
277
+
278
+ # Transform input prompts
279
+
280
+ mask_input, unnorm_coords, labels, unnorm_box = self._prep_prompts(
281
+ point_coords, point_labels, box, mask_input, normalize_coords
282
+ )
283
+
284
+ masks, iou_predictions, low_res_masks = self._predict(
285
+ unnorm_coords,
286
+ labels,
287
+ unnorm_box,
288
+ mask_input,
289
+ multimask_output,
290
+ return_logits=return_logits,
291
+ )
292
+
293
+ masks_np = masks.squeeze(0).float().detach().cpu().numpy()
294
+ iou_predictions_np = iou_predictions.squeeze(0).float().detach().cpu().numpy()
295
+ low_res_masks_np = low_res_masks.squeeze(0).float().detach().cpu().numpy()
296
+ return masks_np, iou_predictions_np, low_res_masks_np
297
+
298
+ def _prep_prompts(
299
+ self, point_coords, point_labels, box, mask_logits, normalize_coords, img_idx=-1
300
+ ):
301
+ unnorm_coords, labels, unnorm_box, mask_input = None, None, None, None
302
+ if point_coords is not None:
303
+ assert (
304
+ point_labels is not None
305
+ ), "point_labels must be supplied if point_coords is supplied."
306
+ point_coords = torch.as_tensor(
307
+ point_coords, dtype=torch.float, device=self.device
308
+ )
309
+ unnorm_coords = self._transforms.transform_coords(
310
+ point_coords, normalize=normalize_coords, orig_hw=self._orig_hw[img_idx]
311
+ )
312
+ labels = torch.as_tensor(point_labels, dtype=torch.int, device=self.device)
313
+ if len(unnorm_coords.shape) == 2:
314
+ unnorm_coords, labels = unnorm_coords[None, ...], labels[None, ...]
315
+ if box is not None:
316
+ box = torch.as_tensor(box, dtype=torch.float, device=self.device)
317
+ unnorm_box = self._transforms.transform_boxes(
318
+ box, normalize=normalize_coords, orig_hw=self._orig_hw[img_idx]
319
+ ) # Bx2x2
320
+ if mask_logits is not None:
321
+ mask_input = torch.as_tensor(
322
+ mask_logits, dtype=torch.float, device=self.device
323
+ )
324
+ if len(mask_input.shape) == 3:
325
+ mask_input = mask_input[None, :, :, :]
326
+ return mask_input, unnorm_coords, labels, unnorm_box
327
+
328
+ @torch.no_grad()
329
+ def _predict(
330
+ self,
331
+ point_coords: Optional[torch.Tensor],
332
+ point_labels: Optional[torch.Tensor],
333
+ boxes: Optional[torch.Tensor] = None,
334
+ mask_input: Optional[torch.Tensor] = None,
335
+ multimask_output: bool = True,
336
+ return_logits: bool = False,
337
+ img_idx: int = -1,
338
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
339
+ """
340
+ Predict masks for the given input prompts, using the currently set image.
341
+ Input prompts are batched torch tensors and are expected to already be
342
+ transformed to the input frame using SAM2Transforms.
343
+
344
+ Arguments:
345
+ point_coords (torch.Tensor or None): A BxNx2 array of point prompts to the
346
+ model. Each point is in (X,Y) in pixels.
347
+ point_labels (torch.Tensor or None): A BxN array of labels for the
348
+ point prompts. 1 indicates a foreground point and 0 indicates a
349
+ background point.
350
+ boxes (np.ndarray or None): A Bx4 array given a box prompt to the
351
+ model, in XYXY format.
352
+ mask_input (np.ndarray): A low resolution mask input to the model, typically
353
+ coming from a previous prediction iteration. Has form Bx1xHxW, where
354
+ for SAM, H=W=256. Masks returned by a previous iteration of the
355
+ predict method do not need further transformation.
356
+ multimask_output (bool): If true, the model will return three masks.
357
+ For ambiguous input prompts (such as a single click), this will often
358
+ produce better masks than a single prediction. If only a single
359
+ mask is needed, the model's predicted quality score can be used
360
+ to select the best mask. For non-ambiguous prompts, such as multiple
361
+ input prompts, multimask_output=False can give better results.
362
+ return_logits (bool): If true, returns un-thresholded masks logits
363
+ instead of a binary mask.
364
+
365
+ Returns:
366
+ (torch.Tensor): The output masks in BxCxHxW format, where C is the
367
+ number of masks, and (H, W) is the original image size.
368
+ (torch.Tensor): An array of shape BxC containing the model's
369
+ predictions for the quality of each mask.
370
+ (torch.Tensor): An array of shape BxCxHxW, where C is the number
371
+ of masks and H=W=256. These low res logits can be passed to
372
+ a subsequent iteration as mask input.
373
+ """
374
+ if not self._is_image_set:
375
+ raise RuntimeError(
376
+ "An image must be set with .set_image(...) before mask prediction."
377
+ )
378
+
379
+ if point_coords is not None:
380
+ concat_points = (point_coords, point_labels)
381
+ else:
382
+ concat_points = None
383
+
384
+ # Embed prompts
385
+ if boxes is not None:
386
+ box_coords = boxes.reshape(-1, 2, 2)
387
+ box_labels = torch.tensor([[2, 3]], dtype=torch.int, device=boxes.device)
388
+ box_labels = box_labels.repeat(boxes.size(0), 1)
389
+ # we merge "boxes" and "points" into a single "concat_points" input (where
390
+ # boxes are added at the beginning) to sam_prompt_encoder
391
+ if concat_points is not None:
392
+ concat_coords = torch.cat([box_coords, concat_points[0]], dim=1)
393
+ concat_labels = torch.cat([box_labels, concat_points[1]], dim=1)
394
+ concat_points = (concat_coords, concat_labels)
395
+ else:
396
+ concat_points = (box_coords, box_labels)
397
+
398
+ sparse_embeddings, dense_embeddings = self.model.sam_prompt_encoder(
399
+ points=concat_points,
400
+ boxes=None,
401
+ masks=mask_input,
402
+ )
403
+
404
+ # Predict masks
405
+ batched_mode = (
406
+ concat_points is not None and concat_points[0].shape[0] > 1
407
+ ) # multi object prediction
408
+ high_res_features = [
409
+ feat_level[img_idx].unsqueeze(0)
410
+ for feat_level in self._features["high_res_feats"]
411
+ ]
412
+ low_res_masks, iou_predictions, _, _ = self.model.sam_mask_decoder(
413
+ image_embeddings=self._features["image_embed"][img_idx].unsqueeze(0),
414
+ image_pe=self.model.sam_prompt_encoder.get_dense_pe(),
415
+ sparse_prompt_embeddings=sparse_embeddings,
416
+ dense_prompt_embeddings=dense_embeddings,
417
+ multimask_output=multimask_output,
418
+ repeat_image=batched_mode,
419
+ high_res_features=high_res_features,
420
+ )
421
+
422
+ # Upscale the masks to the original image resolution
423
+ masks = self._transforms.postprocess_masks(
424
+ low_res_masks, self._orig_hw[img_idx]
425
+ )
426
+ low_res_masks = torch.clamp(low_res_masks, -32.0, 32.0)
427
+ if not return_logits:
428
+ masks = masks > self.mask_threshold
429
+
430
+ return masks, iou_predictions, low_res_masks
431
+
432
+ def get_image_embedding(self) -> torch.Tensor:
433
+ """
434
+ Returns the image embeddings for the currently set image, with
435
+ shape 1xCxHxW, where C is the embedding dimension and (H,W) are
436
+ the embedding spatial dimension of SAM (typically C=256, H=W=64).
437
+ """
438
+ if not self._is_image_set:
439
+ raise RuntimeError(
440
+ "An image must be set with .set_image(...) to generate an embedding."
441
+ )
442
+ assert (
443
+ self._features is not None
444
+ ), "Features must exist if an image has been set."
445
+ return self._features["image_embed"]
446
+
447
+ @property
448
+ def device(self) -> torch.device:
449
+ return self.model.device
450
+
451
+ def reset_predictor(self) -> None:
452
+ """
453
+ Resets the image embeddings and other state variables.
454
+ """
455
+ self._is_image_set = False
456
+ self._features = None
457
+ self._orig_hw = None
458
+ self._is_batch = False
source_code/sam3/sam3/model/sam3_image_processor.py ADDED
@@ -0,0 +1,222 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
2
+ from typing import Dict, List
3
+
4
+ import numpy as np
5
+ import PIL
6
+ import torch
7
+
8
+ from sam3.model import box_ops
9
+
10
+ from sam3.model.data_misc import FindStage, interpolate
11
+ from torchvision.transforms import v2
12
+
13
+
14
+ class Sam3Processor:
15
+ """ """
16
+
17
+ def __init__(self, model, resolution=1008, device="cuda", confidence_threshold=0.5):
18
+ self.model = model
19
+ self.resolution = resolution
20
+ self.device = device
21
+ self.transform = v2.Compose(
22
+ [
23
+ v2.ToDtype(torch.uint8, scale=True),
24
+ v2.Resize(size=(resolution, resolution)),
25
+ v2.ToDtype(torch.float32, scale=True),
26
+ v2.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
27
+ ]
28
+ )
29
+ self.confidence_threshold = confidence_threshold
30
+
31
+ self.find_stage = FindStage(
32
+ img_ids=torch.tensor([0], device=device, dtype=torch.long),
33
+ text_ids=torch.tensor([0], device=device, dtype=torch.long),
34
+ input_boxes=None,
35
+ input_boxes_mask=None,
36
+ input_boxes_label=None,
37
+ input_points=None,
38
+ input_points_mask=None,
39
+ )
40
+
41
+ @torch.inference_mode()
42
+ def set_image(self, image, state=None):
43
+ """Sets the image on which we want to do predictions."""
44
+ if state is None:
45
+ state = {}
46
+
47
+ if isinstance(image, PIL.Image.Image):
48
+ width, height = image.size
49
+ elif isinstance(image, (torch.Tensor, np.ndarray)):
50
+ height, width = image.shape[-2:]
51
+ else:
52
+ raise ValueError("Image must be a PIL image or a tensor")
53
+
54
+ image = v2.functional.to_image(image).to(self.device)
55
+ image = self.transform(image).unsqueeze(0)
56
+
57
+ state["original_height"] = height
58
+ state["original_width"] = width
59
+ state["backbone_out"] = self.model.backbone.forward_image(image)
60
+ inst_interactivity_en = self.model.inst_interactive_predictor is not None
61
+ if inst_interactivity_en and "sam2_backbone_out" in state["backbone_out"]:
62
+ sam2_backbone_out = state["backbone_out"]["sam2_backbone_out"]
63
+ sam2_backbone_out["backbone_fpn"][0] = (
64
+ self.model.inst_interactive_predictor.model.sam_mask_decoder.conv_s0(
65
+ sam2_backbone_out["backbone_fpn"][0]
66
+ )
67
+ )
68
+ sam2_backbone_out["backbone_fpn"][1] = (
69
+ self.model.inst_interactive_predictor.model.sam_mask_decoder.conv_s1(
70
+ sam2_backbone_out["backbone_fpn"][1]
71
+ )
72
+ )
73
+ return state
74
+
75
+ @torch.inference_mode()
76
+ def set_image_batch(self, images: List[np.ndarray], state=None):
77
+ """Sets the image batch on which we want to do predictions."""
78
+ if state is None:
79
+ state = {}
80
+
81
+ if not isinstance(images, list):
82
+ raise ValueError("Images must be a list of PIL images or tensors")
83
+ assert len(images) > 0, "Images list must not be empty"
84
+ assert isinstance(
85
+ images[0], PIL.Image.Image
86
+ ), "Images must be a list of PIL images"
87
+
88
+ state["original_heights"] = [image.height for image in images]
89
+ state["original_widths"] = [image.width for image in images]
90
+
91
+ images = [
92
+ self.transform(v2.functional.to_image(image).to(self.device))
93
+ for image in images
94
+ ]
95
+ images = torch.stack(images, dim=0)
96
+ state["backbone_out"] = self.model.backbone.forward_image(images)
97
+ inst_interactivity_en = self.model.inst_interactive_predictor is not None
98
+ if inst_interactivity_en and "sam2_backbone_out" in state["backbone_out"]:
99
+ sam2_backbone_out = state["backbone_out"]["sam2_backbone_out"]
100
+ sam2_backbone_out["backbone_fpn"][0] = (
101
+ self.model.inst_interactive_predictor.model.sam_mask_decoder.conv_s0(
102
+ sam2_backbone_out["backbone_fpn"][0]
103
+ )
104
+ )
105
+ sam2_backbone_out["backbone_fpn"][1] = (
106
+ self.model.inst_interactive_predictor.model.sam_mask_decoder.conv_s1(
107
+ sam2_backbone_out["backbone_fpn"][1]
108
+ )
109
+ )
110
+ return state
111
+
112
+ @torch.inference_mode()
113
+ def set_text_prompt(self, prompt: str, state: Dict):
114
+ """Sets the text prompt and run the inference"""
115
+
116
+ if "backbone_out" not in state:
117
+ raise ValueError("You must call set_image before set_text_prompt")
118
+
119
+ text_outputs = self.model.backbone.forward_text([prompt], device=self.device)
120
+ # will erase the previous text prompt if any
121
+ state["backbone_out"].update(text_outputs)
122
+ if "geometric_prompt" not in state:
123
+ state["geometric_prompt"] = self.model._get_dummy_prompt()
124
+
125
+ return self._forward_grounding(state)
126
+
127
+ @torch.inference_mode()
128
+ def add_geometric_prompt(self, box: List, label: bool, state: Dict):
129
+ """Adds a box prompt and run the inference.
130
+ The image needs to be set, but not necessarily the text prompt.
131
+ The box is assumed to be in [center_x, center_y, width, height] format and normalized in [0, 1] range.
132
+ The label is True for a positive box, False for a negative box.
133
+ """
134
+ if "backbone_out" not in state:
135
+ raise ValueError("You must call set_image before set_text_prompt")
136
+
137
+ if "language_features" not in state["backbone_out"]:
138
+ # Looks like we don't have a text prompt yet. This is allowed, but we need to set the text prompt to "visual" for the model to rely only on the geometric prompt
139
+ dummy_text_outputs = self.model.backbone.forward_text(
140
+ ["visual"], device=self.device
141
+ )
142
+ state["backbone_out"].update(dummy_text_outputs)
143
+
144
+ if "geometric_prompt" not in state:
145
+ state["geometric_prompt"] = self.model._get_dummy_prompt()
146
+
147
+ # adding a batch and sequence dimension
148
+ boxes = torch.tensor(box, device=self.device, dtype=torch.float32).view(1, 1, 4)
149
+ labels = torch.tensor([label], device=self.device, dtype=torch.bool).view(1, 1)
150
+ state["geometric_prompt"].append_boxes(boxes, labels)
151
+
152
+ return self._forward_grounding(state)
153
+
154
+ def reset_all_prompts(self, state: Dict):
155
+ """Removes all the prompts and results"""
156
+ if "backbone_out" in state:
157
+ backbone_keys_to_del = [
158
+ "language_features",
159
+ "language_mask",
160
+ "language_embeds",
161
+ ]
162
+ for key in backbone_keys_to_del:
163
+ if key in state["backbone_out"]:
164
+ del state["backbone_out"][key]
165
+
166
+ keys_to_del = ["geometric_prompt", "boxes", "masks", "masks_logits", "scores"]
167
+ for key in keys_to_del:
168
+ if key in state:
169
+ del state[key]
170
+
171
+ @torch.inference_mode()
172
+ def set_confidence_threshold(self, threshold: float, state=None):
173
+ """Sets the confidence threshold for the masks"""
174
+ self.confidence_threshold = threshold
175
+ if state is not None and "boxes" in state:
176
+ # we need to filter the boxes again
177
+ # In principle we could do this more efficiently since we would only need
178
+ # to rerun the heads. But this is simpler and not too inefficient
179
+ return self._forward_grounding(state)
180
+ return state
181
+
182
+ @torch.inference_mode()
183
+ def _forward_grounding(self, state: Dict):
184
+ outputs = self.model.forward_grounding(
185
+ backbone_out=state["backbone_out"],
186
+ find_input=self.find_stage,
187
+ geometric_prompt=state["geometric_prompt"],
188
+ find_target=None,
189
+ )
190
+
191
+ out_bbox = outputs["pred_boxes"]
192
+ out_logits = outputs["pred_logits"]
193
+ out_masks = outputs["pred_masks"]
194
+ out_probs = out_logits.sigmoid()
195
+ presence_score = outputs["presence_logit_dec"].sigmoid().unsqueeze(1)
196
+ out_probs = (out_probs * presence_score).squeeze(-1)
197
+
198
+ keep = out_probs > self.confidence_threshold
199
+ out_probs = out_probs[keep]
200
+ out_masks = out_masks[keep]
201
+ out_bbox = out_bbox[keep]
202
+
203
+ # convert to [x0, y0, x1, y1] format
204
+ boxes = box_ops.box_cxcywh_to_xyxy(out_bbox)
205
+
206
+ img_h = state["original_height"]
207
+ img_w = state["original_width"]
208
+ scale_fct = torch.tensor([img_w, img_h, img_w, img_h]).to(self.device)
209
+ boxes = boxes * scale_fct[None, :]
210
+
211
+ out_masks = interpolate(
212
+ out_masks.unsqueeze(1),
213
+ (img_h, img_w),
214
+ mode="bilinear",
215
+ align_corners=False,
216
+ ).sigmoid()
217
+
218
+ state["masks_logits"] = out_masks
219
+ state["masks"] = out_masks > 0.5
220
+ state["boxes"] = boxes
221
+ state["scores"] = out_probs
222
+ return state
source_code/sam3/sam3/model/sam3_tracker_utils.py ADDED
@@ -0,0 +1,427 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
2
+
3
+ import numpy as np
4
+ import torch
5
+ import torch.nn.functional as F
6
+ from numpy.typing import NDArray
7
+
8
+ from sam3.model.edt import edt_triton
9
+
10
+
11
+ def sample_box_points(
12
+ masks: torch.Tensor,
13
+ noise: float = 0.1, # SAM default
14
+ noise_bound: int = 20, # SAM default
15
+ top_left_label: int = 2,
16
+ bottom_right_label: int = 3,
17
+ ) -> tuple[NDArray, NDArray]:
18
+ """
19
+ Sample a noised version of the top left and bottom right corners of a given `bbox`
20
+
21
+ Inputs:
22
+ - masks: [B, 1, H, W] tensor
23
+ - noise: noise as a fraction of box width and height, dtype=float
24
+ - noise_bound: maximum amount of noise (in pure pixels), dtype=int
25
+
26
+ Returns:
27
+ - box_coords: [B, num_pt, 2], contains (x, y) coordinates of top left and bottom right box corners, dtype=torch.float
28
+ - box_labels: [B, num_pt], label 2 is reserverd for top left and 3 for bottom right corners, dtype=torch.int32
29
+ """
30
+ device = masks.device
31
+ box_coords = mask_to_box(masks)
32
+ B, _, H, W = masks.shape
33
+ box_labels = torch.tensor(
34
+ [top_left_label, bottom_right_label], dtype=torch.int, device=device
35
+ ).repeat(B)
36
+ if noise > 0.0:
37
+ if not isinstance(noise_bound, torch.Tensor):
38
+ noise_bound = torch.tensor(noise_bound, device=device)
39
+ bbox_w = box_coords[..., 2] - box_coords[..., 0]
40
+ bbox_h = box_coords[..., 3] - box_coords[..., 1]
41
+ max_dx = torch.min(bbox_w * noise, noise_bound)
42
+ max_dy = torch.min(bbox_h * noise, noise_bound)
43
+ box_noise = 2 * torch.rand(B, 1, 4, device=device) - 1
44
+ box_noise = box_noise * torch.stack((max_dx, max_dy, max_dx, max_dy), dim=-1)
45
+
46
+ box_coords = box_coords + box_noise
47
+ img_bounds = (
48
+ torch.tensor([W, H, W, H], device=device) - 1
49
+ ) # uncentered pixel coords
50
+ box_coords.clamp_(torch.zeros_like(img_bounds), img_bounds) # In place clamping
51
+
52
+ box_coords = box_coords.reshape(-1, 2, 2) # always 2 points
53
+ box_labels = box_labels.reshape(-1, 2)
54
+ return box_coords, box_labels
55
+
56
+
57
+ def mask_to_box(masks: torch.Tensor):
58
+ """
59
+ compute bounding box given an input mask
60
+
61
+ Inputs:
62
+ - masks: [B, 1, H, W] tensor
63
+
64
+ Returns:
65
+ - box_coords: [B, 1, 4], contains (x, y) coordinates of top left and bottom right box corners, dtype=torch.Tensor
66
+ """
67
+ B, _, h, w = masks.shape
68
+ device = masks.device
69
+ mask_area = masks.sum(dim=(-1, -2))
70
+ xs = torch.arange(w, device=device, dtype=torch.int32)
71
+ ys = torch.arange(h, device=device, dtype=torch.int32)
72
+ grid_xs, grid_ys = torch.meshgrid(xs, ys, indexing="xy")
73
+ grid_xs = grid_xs[None, None, ...].expand(B, 1, h, w)
74
+ grid_ys = grid_ys[None, None, ...].expand(B, 1, h, w)
75
+ min_xs, _ = torch.min(torch.where(masks, grid_xs, w).flatten(-2), dim=-1)
76
+ max_xs, _ = torch.max(torch.where(masks, grid_xs, -1).flatten(-2), dim=-1)
77
+ min_ys, _ = torch.min(torch.where(masks, grid_ys, h).flatten(-2), dim=-1)
78
+ max_ys, _ = torch.max(torch.where(masks, grid_ys, -1).flatten(-2), dim=-1)
79
+ bbox_coords = torch.stack((min_xs, min_ys, max_xs, max_ys), dim=-1)
80
+ bbox_coords = torch.where(
81
+ mask_area[..., None] > 0, bbox_coords, torch.zeros_like(bbox_coords)
82
+ )
83
+ return bbox_coords
84
+
85
+
86
+ def sample_random_points_from_errors(gt_masks, pred_masks, num_pt=1):
87
+ """
88
+ Sample `num_pt` random points (along with their labels) independently from the error regions.
89
+
90
+ Inputs:
91
+ - gt_masks: [B, 1, H_im, W_im] masks, dtype=torch.bool
92
+ - pred_masks: [B, 1, H_im, W_im] masks, dtype=torch.bool or None
93
+ - num_pt: int, number of points to sample independently for each of the B error maps
94
+
95
+ Outputs:
96
+ - points: [B, num_pt, 2], dtype=torch.float, contains (x, y) coordinates of each sampled point
97
+ - labels: [B, num_pt], dtype=torch.int32, where 1 means positive clicks and 0 means
98
+ negative clicks
99
+ """
100
+ if pred_masks is None: # if pred_masks is not provided, treat it as empty
101
+ pred_masks = torch.zeros_like(gt_masks)
102
+ assert gt_masks.dtype == torch.bool and gt_masks.size(1) == 1
103
+ assert pred_masks.dtype == torch.bool and pred_masks.shape == gt_masks.shape
104
+ assert num_pt >= 0
105
+
106
+ B, _, H_im, W_im = gt_masks.shape
107
+ device = gt_masks.device
108
+
109
+ # false positive region, a new point sampled in this region should have
110
+ # negative label to correct the FP error
111
+ fp_masks = ~gt_masks & pred_masks
112
+ # false negative region, a new point sampled in this region should have
113
+ # positive label to correct the FN error
114
+ fn_masks = gt_masks & ~pred_masks
115
+ # whether the prediction completely match the ground-truth on each mask
116
+ all_correct = torch.all((gt_masks == pred_masks).flatten(2), dim=2)
117
+ all_correct = all_correct[..., None, None]
118
+
119
+ # channel 0 is FP map, while channel 1 is FN map
120
+ pts_noise = torch.rand(B, num_pt, H_im, W_im, 2, device=device)
121
+ # sample a negative new click from FP region or a positive new click
122
+ # from FN region, depend on where the maximum falls,
123
+ # and in case the predictions are all correct (no FP or FN), we just
124
+ # sample a negative click from the background region
125
+ pts_noise[..., 0] *= fp_masks | (all_correct & ~gt_masks)
126
+ pts_noise[..., 1] *= fn_masks
127
+ pts_idx = pts_noise.flatten(2).argmax(dim=2)
128
+ labels = (pts_idx % 2).to(torch.int32)
129
+ pts_idx = pts_idx // 2
130
+ pts_x = pts_idx % W_im
131
+ pts_y = pts_idx // W_im
132
+ points = torch.stack([pts_x, pts_y], dim=2).to(torch.float)
133
+ return points, labels
134
+
135
+
136
+ def sample_one_point_from_error_center(gt_masks, pred_masks, padding=True):
137
+ """
138
+ Sample 1 random point (along with its label) from the center of each error region,
139
+ that is, the point with the largest distance to the boundary of each error region.
140
+ This is the RITM sampling method from https://github.com/saic-vul/ritm_interactive_segmentation/blob/master/isegm/inference/clicker.py
141
+
142
+ Inputs:
143
+ - gt_masks: [B, 1, H_im, W_im] masks, dtype=torch.bool
144
+ - pred_masks: [B, 1, H_im, W_im] masks, dtype=torch.bool or None
145
+ - padding: if True, pad with boundary of 1 px for distance transform
146
+
147
+ Outputs:
148
+ - points: [B, 1, 2], dtype=torch.float, contains (x, y) coordinates of each sampled point
149
+ - labels: [B, 1], dtype=torch.int32, where 1 means positive clicks and 0 means negative clicks
150
+ """
151
+ if pred_masks is None:
152
+ pred_masks = torch.zeros_like(gt_masks)
153
+ assert gt_masks.dtype == torch.bool and gt_masks.size(1) == 1
154
+ assert pred_masks.dtype == torch.bool and pred_masks.shape == gt_masks.shape
155
+
156
+ B, _, H, W = gt_masks.shape
157
+
158
+ # false positive region, a new point sampled in this region should have
159
+ # negative label to correct the FP error
160
+ fp_masks = (~gt_masks & pred_masks).squeeze(1)
161
+ # false negative region, a new point sampled in this region should have
162
+ # positive label to correct the FN error
163
+ fn_masks = (gt_masks & ~pred_masks).squeeze(1)
164
+
165
+ if padding:
166
+ padded_fp_masks = torch.zeros(
167
+ B, H + 2, W + 2, dtype=fp_masks.dtype, device=fp_masks.device
168
+ )
169
+ padded_fp_masks[:, 1 : H + 1, 1 : W + 1] = fp_masks
170
+ padded_fn_masks = torch.zeros(
171
+ B, H + 2, W + 2, dtype=fp_masks.dtype, device=fp_masks.device
172
+ )
173
+ padded_fn_masks[:, 1 : H + 1, 1 : W + 1] = fn_masks
174
+ else:
175
+ padded_fp_masks = fp_masks
176
+ padded_fn_masks = fn_masks
177
+
178
+ fn_mask_dt = edt_triton(padded_fn_masks)
179
+ fp_mask_dt = edt_triton(padded_fp_masks)
180
+ if padding:
181
+ fn_mask_dt = fn_mask_dt[:, 1:-1, 1:-1]
182
+ fp_mask_dt = fp_mask_dt[:, 1:-1, 1:-1]
183
+
184
+ fn_max, fn_argmax = fn_mask_dt.reshape(B, -1).max(dim=-1)
185
+ fp_max, fp_argmax = fp_mask_dt.reshape(B, -1).max(dim=-1)
186
+ is_positive = fn_max > fp_max
187
+ chosen = torch.where(is_positive, fn_argmax, fp_argmax)
188
+ points_x = chosen % W
189
+ points_y = chosen // W
190
+
191
+ labels = is_positive.long()
192
+ points = torch.stack([points_x, points_y], -1)
193
+ return points.unsqueeze(1), labels.unsqueeze(1)
194
+
195
+
196
+ def sample_one_point_from_error_center_slow(gt_masks, pred_masks, padding=True):
197
+ """
198
+ Sample 1 random point (along with its label) from the center of each error region,
199
+ that is, the point with the largest distance to the boundary of each error region.
200
+ This is the RITM sampling method from https://github.com/saic-vul/ritm_interactive_segmentation/blob/master/isegm/inference/clicker.py
201
+
202
+ Inputs:
203
+ - gt_masks: [B, 1, H_im, W_im] masks, dtype=torch.bool
204
+ - pred_masks: [B, 1, H_im, W_im] masks, dtype=torch.bool or None
205
+ - padding: if True, pad with boundary of 1 px for distance transform
206
+
207
+ Outputs:
208
+ - points: [B, 1, 2], dtype=torch.float, contains (x, y) coordinates of each sampled point
209
+ - labels: [B, 1], dtype=torch.int32, where 1 means positive clicks and 0 means negative clicks
210
+ """
211
+ import cv2 # delay OpenCV import to avoid unnecessary dependency
212
+
213
+ if pred_masks is None:
214
+ pred_masks = torch.zeros_like(gt_masks)
215
+ assert gt_masks.dtype == torch.bool and gt_masks.size(1) == 1
216
+ assert pred_masks.dtype == torch.bool and pred_masks.shape == gt_masks.shape
217
+
218
+ B, _, _, W_im = gt_masks.shape
219
+ device = gt_masks.device
220
+
221
+ # false positive region, a new point sampled in this region should have
222
+ # negative label to correct the FP error
223
+ fp_masks = ~gt_masks & pred_masks
224
+ # false negative region, a new point sampled in this region should have
225
+ # positive label to correct the FN error
226
+ fn_masks = gt_masks & ~pred_masks
227
+
228
+ fp_masks = fp_masks.cpu().numpy()
229
+ fn_masks = fn_masks.cpu().numpy()
230
+ points = torch.zeros(B, 1, 2, dtype=torch.float)
231
+ labels = torch.ones(B, 1, dtype=torch.int32)
232
+ for b in range(B):
233
+ fn_mask = fn_masks[b, 0]
234
+ fp_mask = fp_masks[b, 0]
235
+ if padding:
236
+ fn_mask = np.pad(fn_mask, ((1, 1), (1, 1)), "constant")
237
+ fp_mask = np.pad(fp_mask, ((1, 1), (1, 1)), "constant")
238
+ # compute the distance of each point in FN/FP region to its boundary
239
+ fn_mask_dt = cv2.distanceTransform(fn_mask.astype(np.uint8), cv2.DIST_L2, 0)
240
+ fp_mask_dt = cv2.distanceTransform(fp_mask.astype(np.uint8), cv2.DIST_L2, 0)
241
+ if padding:
242
+ fn_mask_dt = fn_mask_dt[1:-1, 1:-1]
243
+ fp_mask_dt = fp_mask_dt[1:-1, 1:-1]
244
+
245
+ # take the point in FN/FP region with the largest distance to its boundary
246
+ fn_mask_dt_flat = fn_mask_dt.reshape(-1)
247
+ fp_mask_dt_flat = fp_mask_dt.reshape(-1)
248
+ fn_argmax = np.argmax(fn_mask_dt_flat)
249
+ fp_argmax = np.argmax(fp_mask_dt_flat)
250
+ is_positive = fn_mask_dt_flat[fn_argmax] > fp_mask_dt_flat[fp_argmax]
251
+ pt_idx = fn_argmax if is_positive else fp_argmax
252
+ points[b, 0, 0] = pt_idx % W_im # x
253
+ points[b, 0, 1] = pt_idx // W_im # y
254
+ labels[b, 0] = int(is_positive)
255
+
256
+ points = points.to(device)
257
+ labels = labels.to(device)
258
+ return points, labels
259
+
260
+
261
+ def get_next_point(gt_masks, pred_masks, method):
262
+ if method == "uniform":
263
+ return sample_random_points_from_errors(gt_masks, pred_masks)
264
+ elif method == "center":
265
+ return sample_one_point_from_error_center(gt_masks, pred_masks)
266
+ else:
267
+ raise ValueError(f"unknown sampling method {method}")
268
+
269
+
270
+ def select_closest_cond_frames(
271
+ frame_idx, cond_frame_outputs, max_cond_frame_num, keep_first_cond_frame=False
272
+ ):
273
+ """
274
+ Select up to `max_cond_frame_num` conditioning frames from `cond_frame_outputs`
275
+ that are temporally closest to the current frame at `frame_idx`. Here, we take
276
+ - a) the closest conditioning frame before `frame_idx` (if any);
277
+ - b) the closest conditioning frame after `frame_idx` (if any);
278
+ - c) any other temporally closest conditioning frames until reaching a total
279
+ of `max_cond_frame_num` conditioning frames.
280
+
281
+ Outputs:
282
+ - selected_outputs: selected items (keys & values) from `cond_frame_outputs`.
283
+ - unselected_outputs: items (keys & values) not selected in `cond_frame_outputs`.
284
+ """
285
+ if max_cond_frame_num == -1 or len(cond_frame_outputs) <= max_cond_frame_num:
286
+ selected_outputs = cond_frame_outputs
287
+ unselected_outputs = {}
288
+ else:
289
+ assert max_cond_frame_num >= 2, "we should allow using 2+ conditioning frames"
290
+ selected_outputs = {}
291
+ if keep_first_cond_frame:
292
+ idx_first = min(
293
+ (t for t in cond_frame_outputs if t < frame_idx), default=None
294
+ )
295
+ if idx_first is None:
296
+ # Maybe we are tracking in reverse
297
+ idx_first = max(
298
+ (t for t in cond_frame_outputs if t > frame_idx), default=None
299
+ )
300
+ if idx_first is not None:
301
+ selected_outputs[idx_first] = cond_frame_outputs[idx_first]
302
+ # the closest conditioning frame before `frame_idx` (if any)
303
+ idx_before = max((t for t in cond_frame_outputs if t < frame_idx), default=None)
304
+ if idx_before is not None:
305
+ selected_outputs[idx_before] = cond_frame_outputs[idx_before]
306
+
307
+ # the closest conditioning frame after `frame_idx` (if any)
308
+ idx_after = min((t for t in cond_frame_outputs if t >= frame_idx), default=None)
309
+ if idx_after is not None:
310
+ selected_outputs[idx_after] = cond_frame_outputs[idx_after]
311
+
312
+ # add other temporally closest conditioning frames until reaching a total
313
+ # of `max_cond_frame_num` conditioning frames.
314
+ num_remain = max_cond_frame_num - len(selected_outputs)
315
+ inds_remain = sorted(
316
+ (t for t in cond_frame_outputs if t not in selected_outputs),
317
+ key=lambda x: abs(x - frame_idx),
318
+ )[:num_remain]
319
+ selected_outputs.update((t, cond_frame_outputs[t]) for t in inds_remain)
320
+ unselected_outputs = {
321
+ t: v for t, v in cond_frame_outputs.items() if t not in selected_outputs
322
+ }
323
+
324
+ return selected_outputs, unselected_outputs
325
+
326
+
327
+ def get_1d_sine_pe(pos_inds, dim, temperature=10000):
328
+ """
329
+ Get 1D sine positional embedding as in the original Transformer paper.
330
+ """
331
+ pe_dim = dim // 2
332
+ dim_t = torch.arange(pe_dim, dtype=torch.float32, device=pos_inds.device)
333
+ dim_t = temperature ** (2 * (dim_t // 2) / pe_dim)
334
+
335
+ pos_embed = pos_inds.unsqueeze(-1) / dim_t
336
+ pos_embed = torch.cat([pos_embed.sin(), pos_embed.cos()], dim=-1)
337
+ return pos_embed
338
+
339
+
340
+ def get_best_gt_match_from_multimasks(pred_multimasks, gt_masks, pred_scores=None):
341
+ """
342
+ Get the mask with the best match to GT masks (based on IoU) from pred_multimasks.
343
+ Optionally, use `pred_scores` to break ties in case all IoUs are zeros.
344
+ """
345
+ assert pred_multimasks.ndim == 4 and gt_masks.ndim == 4
346
+ if pred_multimasks.size(1) == 1:
347
+ return pred_multimasks # only a single mask channel, nothing to select
348
+
349
+ pred_multimasks_binary = pred_multimasks > 0
350
+ area_i = torch.sum(pred_multimasks_binary & gt_masks, dim=(2, 3)).float()
351
+ area_u = torch.sum(pred_multimasks_binary | gt_masks, dim=(2, 3)).float()
352
+ ious = area_i / torch.clamp(area_u, min=1.0)
353
+
354
+ # In case all IoUs are zeros (e.g. because the GT mask is empty), use pred_scores
355
+ # to break ties and select the best mask
356
+ if pred_scores is not None:
357
+ has_nonzero_ious = torch.any(ious > 0).expand_as(ious)
358
+ scores = torch.where(has_nonzero_ious, ious, pred_scores)
359
+ else:
360
+ scores = ious
361
+
362
+ # Finally, take the best mask prediction (with the highest score)
363
+ best_scores_inds = torch.argmax(scores, dim=-1)
364
+ batch_inds = torch.arange(scores.size(0), device=scores.device)
365
+ best_pred_mask = pred_multimasks[batch_inds, best_scores_inds].unsqueeze(1)
366
+ return best_pred_mask
367
+
368
+
369
+ def fill_holes_in_mask_scores(mask, max_area, fill_holes=True, remove_sprinkles=True):
370
+ """
371
+ A post processor to fill small holes in mask scores with area under `max_area`.
372
+ Holes are those small connected components in either background or foreground.
373
+
374
+ Note that it relies on the "cc_torch" package to find connected components fast. You can
375
+ install it via the following command (`TORCH_CUDA_ARCH_LIST=8.0` is for A100 GPUs):
376
+ ```
377
+ pip uninstall -y cc_torch; TORCH_CUDA_ARCH_LIST=8.0 9.0 pip install git+https://github.com/ronghanghu/cc_torch
378
+ ```
379
+ Otherwise, it will fallback to a slightly slower triton implementation, or skimage if the tensor is on cpu
380
+ """
381
+
382
+ if max_area <= 0:
383
+ return mask # nothing to fill in this case
384
+
385
+ if fill_holes:
386
+ # We remove small connected components in background by changing them to foreground
387
+ # with a small positive mask score (0.1).
388
+ mask_bg = mask <= 0
389
+ bg_area_thresh = max_area
390
+ _, areas_bg = _get_connected_components_with_padding(mask_bg)
391
+ small_components_bg = mask_bg & (areas_bg <= bg_area_thresh)
392
+ mask = torch.where(small_components_bg, 0.1, mask)
393
+
394
+ if remove_sprinkles:
395
+ # We remove small connected components in foreground by changing them to background
396
+ # with a small negative mask score (-0.1). Here we only remove connected components
397
+ # whose areas are under both `max_area` and half of the entire mask's area. This
398
+ # removes sprinkles while avoids filtering out tiny objects that we want to track.
399
+ mask_fg = mask > 0
400
+ fg_area_thresh = torch.sum(mask_fg, dim=(2, 3), keepdim=True, dtype=torch.int32)
401
+ fg_area_thresh.floor_divide_(2).clamp_(max=max_area)
402
+ _, areas_fg = _get_connected_components_with_padding(mask_fg)
403
+ small_components_fg = mask_fg & (areas_fg <= fg_area_thresh)
404
+ mask = torch.where(small_components_fg, -0.1, mask)
405
+ return mask
406
+
407
+
408
+ def _get_connected_components_with_padding(mask):
409
+ """Get connected components from masks (possibly padding them to an even size)."""
410
+ from sam3.perflib.connected_components import connected_components
411
+
412
+ mask = mask.to(torch.uint8)
413
+ _, _, H, W = mask.shape
414
+ # make sure both height and width are even (to be compatible with cc_torch)
415
+ pad_h = H % 2
416
+ pad_w = W % 2
417
+ if pad_h == 0 and pad_w == 0:
418
+ labels, counts = connected_components(mask)
419
+ else:
420
+ # pad the mask to make its height and width even
421
+ # padding format is (padding_left,padding_right,padding_top,padding_bottom)
422
+ mask_pad = F.pad(mask, (0, pad_w, 0, pad_h), mode="constant", value=0)
423
+ labels, counts = connected_components(mask_pad)
424
+ labels = labels[:, :, :H, :W]
425
+ counts = counts[:, :, :H, :W]
426
+
427
+ return labels, counts
source_code/sam3/sam3/model/sam3_tracking_predictor.py ADDED
@@ -0,0 +1,1368 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
2
+
3
+ import logging
4
+ from collections import OrderedDict
5
+
6
+ import torch
7
+
8
+ from sam3.model.sam3_tracker_base import concat_points, NO_OBJ_SCORE, Sam3TrackerBase
9
+ from sam3.model.sam3_tracker_utils import fill_holes_in_mask_scores
10
+ from sam3.model.utils.sam2_utils import load_video_frames
11
+ from tqdm.auto import tqdm
12
+
13
+
14
+ class Sam3TrackerPredictor(Sam3TrackerBase):
15
+ """
16
+ The demo class that extends the `Sam3TrackerBase` to handle user interactions
17
+ and manage inference states, with support for multi-object tracking.
18
+ """
19
+
20
+ def __init__(
21
+ self,
22
+ # whether to clear non-conditioning memory of the surrounding frames (which may contain outdated information) after adding correction clicks;
23
+ # note that this would only apply to *single-object tracking* unless `clear_non_cond_mem_for_multi_obj` is also set to True)
24
+ clear_non_cond_mem_around_input=False,
25
+ # whether to also clear non-conditioning memory of the surrounding frames (only effective when `clear_non_cond_mem_around_input` is True).
26
+ clear_non_cond_mem_for_multi_obj=False,
27
+ # if fill_hole_area > 0, we fill small holes in the final masks up to this area (after resizing them to the original video resolution)
28
+ fill_hole_area=0,
29
+ # if always_start_from_first_ann_frame is True, we always start tracking from the frame where we receive the first annotation (clicks or mask)
30
+ # and ignore the `start_frame_idx` passed to `propagate_in_video`
31
+ always_start_from_first_ann_frame=False,
32
+ # the maximum number of points to be used in the prompt encoder, which reduce the domain gap between training (that only has 8 points)
33
+ # - if it's set to a positive integer, we only take the `max_point_num_in_prompt_enc//2` points and
34
+ # the last `(max_point_num_in_prompt_enc - max_point_num_in_prompt_enc//2)` points in the prompt encoder
35
+ # - if it's set to 0 or negative, this option is turned off and we use all points in the prompt encoder
36
+ max_point_num_in_prompt_enc=16,
37
+ non_overlap_masks_for_output=True,
38
+ # checkpoint_file=None,
39
+ **kwargs,
40
+ ):
41
+ super().__init__(**kwargs)
42
+ self.clear_non_cond_mem_around_input = clear_non_cond_mem_around_input
43
+ self.clear_non_cond_mem_for_multi_obj = clear_non_cond_mem_for_multi_obj
44
+ self.fill_hole_area = fill_hole_area
45
+ self.always_start_from_first_ann_frame = always_start_from_first_ann_frame
46
+ self.max_point_num_in_prompt_enc = max_point_num_in_prompt_enc
47
+ self.non_overlap_masks_for_output = non_overlap_masks_for_output
48
+
49
+ self.bf16_context = torch.autocast(device_type="cuda", dtype=torch.bfloat16)
50
+ self.bf16_context.__enter__() # keep using for the entire model process
51
+
52
+ self.iter_use_prev_mask_pred = True
53
+ self.add_all_frames_to_correct_as_cond = True
54
+
55
+ @torch.inference_mode()
56
+ def init_state(
57
+ self,
58
+ video_height=None,
59
+ video_width=None,
60
+ num_frames=None,
61
+ video_path=None,
62
+ cached_features=None,
63
+ offload_video_to_cpu=False,
64
+ offload_state_to_cpu=False,
65
+ async_loading_frames=False,
66
+ ):
67
+ """Initialize a inference state."""
68
+ inference_state = {}
69
+ # whether to offload the video frames to CPU memory
70
+ # turning on this option saves the GPU memory with only a very small overhead
71
+ inference_state["offload_video_to_cpu"] = offload_video_to_cpu
72
+ # whether to offload the inference state to CPU memory
73
+ # turning on this option saves the GPU memory at the cost of a lower tracking fps
74
+ # (e.g. in a test case of 768x768 model, fps dropped from 27 to 24 when tracking one object
75
+ # and from 24 to 21 when tracking two objects)
76
+ inference_state["offload_state_to_cpu"] = offload_state_to_cpu
77
+ inference_state["device"] = self.device
78
+ if offload_state_to_cpu:
79
+ inference_state["storage_device"] = torch.device("cpu")
80
+ else:
81
+ inference_state["storage_device"] = torch.device("cuda")
82
+
83
+ if video_path is not None:
84
+ images, video_height, video_width = load_video_frames(
85
+ video_path=video_path,
86
+ image_size=self.image_size,
87
+ offload_video_to_cpu=offload_video_to_cpu,
88
+ async_loading_frames=async_loading_frames,
89
+ compute_device=inference_state["storage_device"],
90
+ )
91
+ inference_state["images"] = images
92
+ inference_state["num_frames"] = len(images)
93
+ inference_state["video_height"] = video_height
94
+ inference_state["video_width"] = video_width
95
+ else:
96
+ # the original video height and width, used for resizing final output scores
97
+ inference_state["video_height"] = video_height
98
+ inference_state["video_width"] = video_width
99
+ inference_state["num_frames"] = num_frames
100
+ # inputs on each frame
101
+ inference_state["point_inputs_per_obj"] = {}
102
+ inference_state["mask_inputs_per_obj"] = {}
103
+ # visual features on a small number of recently visited frames for quick interactions
104
+ inference_state["cached_features"] = (
105
+ {} if cached_features is None else cached_features
106
+ )
107
+ # values that don't change across frames (so we only need to hold one copy of them)
108
+ inference_state["constants"] = {}
109
+ # mapping between client-side object id and model-side object index
110
+ inference_state["obj_id_to_idx"] = OrderedDict()
111
+ inference_state["obj_idx_to_id"] = OrderedDict()
112
+ inference_state["obj_ids"] = []
113
+ # A storage to hold the model's tracking results and states on each frame
114
+ inference_state["output_dict"] = {
115
+ "cond_frame_outputs": {}, # dict containing {frame_idx: <out>}
116
+ "non_cond_frame_outputs": {}, # dict containing {frame_idx: <out>}
117
+ }
118
+ # The index of the frame that received the first annotation
119
+ inference_state["first_ann_frame_idx"] = None
120
+ # Slice (view) of each object tracking results, sharing the same memory with "output_dict"
121
+ inference_state["output_dict_per_obj"] = {}
122
+ # A temporary storage to hold new outputs when user interact with a frame
123
+ # to add clicks or mask (it's merged into "output_dict" before propagation starts)
124
+ inference_state["temp_output_dict_per_obj"] = {}
125
+ # Frames that already holds consolidated outputs from click or mask inputs
126
+ # (we directly use their consolidated outputs during tracking)
127
+ inference_state["consolidated_frame_inds"] = {
128
+ "cond_frame_outputs": set(), # set containing frame indices
129
+ "non_cond_frame_outputs": set(), # set containing frame indices
130
+ }
131
+ # metadata for each tracking frame (e.g. which direction it's tracked)
132
+ inference_state["tracking_has_started"] = False
133
+ inference_state["frames_already_tracked"] = {}
134
+ self.clear_all_points_in_video(inference_state)
135
+ return inference_state
136
+
137
+ def _obj_id_to_idx(self, inference_state, obj_id):
138
+ """Map client-side object id to model-side object index."""
139
+ obj_idx = inference_state["obj_id_to_idx"].get(obj_id, None)
140
+ if obj_idx is not None:
141
+ return obj_idx
142
+
143
+ # This is a new object id not sent to the server before. We only allow adding
144
+ # new objects *before* the tracking starts.
145
+ allow_new_object = not inference_state["tracking_has_started"]
146
+ if allow_new_object:
147
+ # get the next object slot
148
+ obj_idx = len(inference_state["obj_id_to_idx"])
149
+ inference_state["obj_id_to_idx"][obj_id] = obj_idx
150
+ inference_state["obj_idx_to_id"][obj_idx] = obj_id
151
+ inference_state["obj_ids"] = list(inference_state["obj_id_to_idx"])
152
+ # set up input and output structures for this object
153
+ inference_state["point_inputs_per_obj"][obj_idx] = {}
154
+ inference_state["mask_inputs_per_obj"][obj_idx] = {}
155
+ inference_state["output_dict_per_obj"][obj_idx] = {
156
+ "cond_frame_outputs": {}, # dict containing {frame_idx: <out>}
157
+ "non_cond_frame_outputs": {}, # dict containing {frame_idx: <out>}
158
+ }
159
+ inference_state["temp_output_dict_per_obj"][obj_idx] = {
160
+ "cond_frame_outputs": {}, # dict containing {frame_idx: <out>}
161
+ "non_cond_frame_outputs": {}, # dict containing {frame_idx: <out>}
162
+ }
163
+ return obj_idx
164
+ else:
165
+ raise RuntimeError(
166
+ f"Cannot add new object id {obj_id} after tracking starts. "
167
+ f"All existing object ids: {inference_state['obj_ids']}."
168
+ )
169
+
170
+ def _obj_idx_to_id(self, inference_state, obj_idx):
171
+ """Map model-side object index to client-side object id."""
172
+ return inference_state["obj_idx_to_id"][obj_idx]
173
+
174
+ def _get_obj_num(self, inference_state):
175
+ """Get the total number of unique object ids received so far in this session."""
176
+ return len(inference_state["obj_idx_to_id"])
177
+
178
+ @torch.inference_mode()
179
+ def add_new_points_or_box(
180
+ self,
181
+ inference_state,
182
+ frame_idx,
183
+ obj_id,
184
+ points=None,
185
+ labels=None,
186
+ clear_old_points=True,
187
+ rel_coordinates=True,
188
+ use_prev_mem_frame=False,
189
+ normalize_coords=True,
190
+ box=None,
191
+ ):
192
+ """Add new points to a frame."""
193
+ obj_idx = self._obj_id_to_idx(inference_state, obj_id)
194
+ point_inputs_per_frame = inference_state["point_inputs_per_obj"][obj_idx]
195
+ mask_inputs_per_frame = inference_state["mask_inputs_per_obj"][obj_idx]
196
+
197
+ if (points is not None) != (labels is not None):
198
+ raise ValueError("points and labels must be provided together")
199
+ if points is None and box is None:
200
+ raise ValueError("at least one of points or box must be provided as input")
201
+
202
+ if points is None:
203
+ points = torch.zeros(0, 2, dtype=torch.float32)
204
+ elif not isinstance(points, torch.Tensor):
205
+ points = torch.tensor(points, dtype=torch.float32)
206
+ if labels is None:
207
+ labels = torch.zeros(0, dtype=torch.int32)
208
+ elif not isinstance(labels, torch.Tensor):
209
+ labels = torch.tensor(labels, dtype=torch.int32)
210
+ if points.dim() == 2:
211
+ points = points.unsqueeze(0) # add batch dimension
212
+ if labels.dim() == 1:
213
+ labels = labels.unsqueeze(0) # add batch dimension
214
+
215
+ if rel_coordinates:
216
+ # convert the points from relative coordinates to absolute coordinates
217
+ if points is not None:
218
+ points = points * self.image_size
219
+ if box is not None:
220
+ box = box * self.image_size
221
+
222
+ # If `box` is provided, we add it as the first two points with labels 2 and 3
223
+ # along with the user-provided points (consistent with how SAM 2 is trained).
224
+ if box is not None:
225
+ if not clear_old_points:
226
+ raise ValueError(
227
+ "cannot add box without clearing old points, since "
228
+ "box prompt must be provided before any point prompt "
229
+ "(please use clear_old_points=True instead)"
230
+ )
231
+ if not isinstance(box, torch.Tensor):
232
+ box = torch.tensor(box, dtype=torch.float32, device=points.device)
233
+ box_coords = box.reshape(1, 2, 2)
234
+ box_labels = torch.tensor([2, 3], dtype=torch.int32, device=labels.device)
235
+ box_labels = box_labels.reshape(1, 2)
236
+ points = torch.cat([box_coords, points], dim=1)
237
+ labels = torch.cat([box_labels, labels], dim=1)
238
+
239
+ points = points.to(inference_state["device"])
240
+ labels = labels.to(inference_state["device"])
241
+
242
+ if not clear_old_points:
243
+ point_inputs = point_inputs_per_frame.get(frame_idx, None)
244
+ else:
245
+ point_inputs = None
246
+ point_inputs = concat_points(point_inputs, points, labels)
247
+
248
+ point_inputs_per_frame[frame_idx] = point_inputs
249
+ mask_inputs_per_frame.pop(frame_idx, None)
250
+ # If this frame hasn't been tracked before, we treat it as an initial conditioning
251
+ # frame, meaning that the inputs points are to generate segments on this frame without
252
+ # using any memory from other frames, like in SAM. Otherwise (if it has been tracked),
253
+ # the input points will be used to correct the already tracked masks.
254
+ is_init_cond_frame = frame_idx not in inference_state["frames_already_tracked"]
255
+ # whether to track in reverse time order
256
+ if is_init_cond_frame:
257
+ reverse = False
258
+ else:
259
+ reverse = inference_state["frames_already_tracked"][frame_idx]["reverse"]
260
+ obj_output_dict = inference_state["output_dict_per_obj"][obj_idx]
261
+ obj_temp_output_dict = inference_state["temp_output_dict_per_obj"][obj_idx]
262
+ # Add a frame to conditioning output if it's an initial conditioning frame or
263
+ # if the model sees all frames receiving clicks/mask as conditioning frames.
264
+ is_cond = is_init_cond_frame or self.add_all_frames_to_correct_as_cond
265
+ storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs"
266
+
267
+ # Limit to a maximum number of input points to the prompt encoder (to reduce domain gap)
268
+ num_points = point_inputs["point_coords"].size(1)
269
+ if num_points > self.max_point_num_in_prompt_enc > 0:
270
+ num_first = self.max_point_num_in_prompt_enc // 2
271
+ num_last = self.max_point_num_in_prompt_enc - num_first
272
+ point_inputs["point_coords"] = torch.cat(
273
+ [
274
+ point_inputs["point_coords"][:, :num_first],
275
+ point_inputs["point_coords"][:, -num_last:],
276
+ ],
277
+ dim=1,
278
+ )
279
+ point_inputs["point_labels"] = torch.cat(
280
+ [
281
+ point_inputs["point_labels"][:, :num_first],
282
+ point_inputs["point_labels"][:, -num_last:],
283
+ ],
284
+ dim=1,
285
+ )
286
+ logging.warning(
287
+ f"Too many points ({num_points}) are provided on frame {frame_idx}. Only "
288
+ f"the first {num_first} points and the last {num_last} points will be used."
289
+ )
290
+ # Get any previously predicted mask logits on this object and feed it along with
291
+ # the new clicks into the SAM mask decoder when `self.iter_use_prev_mask_pred=True`.
292
+ prev_sam_mask_logits = None
293
+ if self.iter_use_prev_mask_pred:
294
+ # lookup temporary output dict first, which contains the most recent output
295
+ # (if not found, then lookup conditioning and non-conditioning frame output)
296
+ prev_out = obj_temp_output_dict[storage_key].get(frame_idx)
297
+ if prev_out is None:
298
+ prev_out = obj_output_dict["cond_frame_outputs"].get(frame_idx)
299
+ if prev_out is None:
300
+ prev_out = obj_output_dict["non_cond_frame_outputs"].get(frame_idx)
301
+
302
+ if prev_out is not None and prev_out["pred_masks"] is not None:
303
+ prev_sam_mask_logits = prev_out["pred_masks"].cuda(non_blocking=True)
304
+ # Clamp the scale of prev_sam_mask_logits to avoid rare numerical issues.
305
+ prev_sam_mask_logits = torch.clamp(prev_sam_mask_logits, -32.0, 32.0)
306
+ current_out, _ = self._run_single_frame_inference(
307
+ inference_state=inference_state,
308
+ output_dict=obj_output_dict, # run on the slice of a single object
309
+ frame_idx=frame_idx,
310
+ batch_size=1, # run on the slice of a single object
311
+ is_init_cond_frame=is_init_cond_frame,
312
+ point_inputs=point_inputs,
313
+ mask_inputs=None,
314
+ reverse=reverse,
315
+ # Skip the memory encoder when adding clicks or mask. We execute the memory encoder
316
+ # at the beginning of `propagate_in_video` (after user finalize their clicks). This
317
+ # allows us to enforce non-overlapping constraints on all objects before encoding
318
+ # them into memory.
319
+ run_mem_encoder=False,
320
+ prev_sam_mask_logits=prev_sam_mask_logits,
321
+ use_prev_mem_frame=use_prev_mem_frame,
322
+ )
323
+ # Add the output to the output dict (to be used as future memory)
324
+ obj_temp_output_dict[storage_key][frame_idx] = current_out
325
+
326
+ # Resize the output mask to the original video resolution
327
+ obj_ids = inference_state["obj_ids"]
328
+ consolidated_out = self._consolidate_temp_output_across_obj(
329
+ inference_state,
330
+ frame_idx,
331
+ is_cond=is_cond,
332
+ run_mem_encoder=False,
333
+ consolidate_at_video_res=True,
334
+ )
335
+ _, video_res_masks = self._get_orig_video_res_output(
336
+ inference_state, consolidated_out["pred_masks_video_res"]
337
+ )
338
+ low_res_masks = None # not needed by the demo
339
+ return frame_idx, obj_ids, low_res_masks, video_res_masks
340
+
341
+ @torch.inference_mode()
342
+ def add_new_mask(
343
+ self,
344
+ inference_state,
345
+ frame_idx,
346
+ obj_id,
347
+ mask,
348
+ add_mask_to_memory=False,
349
+ ):
350
+ """Add new mask to a frame."""
351
+ obj_idx = self._obj_id_to_idx(inference_state, obj_id)
352
+ point_inputs_per_frame = inference_state["point_inputs_per_obj"][obj_idx]
353
+ mask_inputs_per_frame = inference_state["mask_inputs_per_obj"][obj_idx]
354
+
355
+ assert mask.dim() == 2
356
+ mask_H, mask_W = mask.shape
357
+ mask_inputs_orig = mask[None, None] # add batch and channel dimension
358
+ mask_inputs_orig = mask_inputs_orig.float().to(inference_state["device"])
359
+
360
+ # resize the mask if it doesn't match the model's input mask size
361
+ if mask_H != self.input_mask_size or mask_W != self.input_mask_size:
362
+ mask_inputs = torch.nn.functional.interpolate(
363
+ mask_inputs_orig,
364
+ size=(self.input_mask_size, self.input_mask_size),
365
+ align_corners=False,
366
+ mode="bilinear",
367
+ antialias=True, # use antialias for downsampling
368
+ )
369
+ else:
370
+ mask_inputs = mask_inputs_orig
371
+
372
+ # also get the mask at the original video resolution (for outputting)
373
+ video_H = inference_state["video_height"]
374
+ video_W = inference_state["video_width"]
375
+ if mask_H != video_H or mask_W != video_W:
376
+ mask_inputs_video_res = torch.nn.functional.interpolate(
377
+ mask_inputs_orig,
378
+ size=(video_H, video_W),
379
+ align_corners=False,
380
+ mode="bilinear",
381
+ antialias=True, # use antialias for potential downsampling
382
+ )
383
+ else:
384
+ mask_inputs_video_res = mask_inputs_orig
385
+ # convert mask_inputs_video_res to binary (threshold at 0.5 as it is in range 0~1)
386
+ mask_inputs_video_res = mask_inputs_video_res > 0.5
387
+
388
+ mask_inputs_per_frame[frame_idx] = mask_inputs_video_res
389
+ point_inputs_per_frame.pop(frame_idx, None)
390
+ # If this frame hasn't been tracked before, we treat it as an initial conditioning
391
+ # frame, meaning that the inputs points are to generate segments on this frame without
392
+ # using any memory from other frames, like in SAM. Otherwise (if it has been tracked),
393
+ # the input points will be used to correct the already tracked masks.
394
+ is_init_cond_frame = frame_idx not in inference_state["frames_already_tracked"]
395
+ # whether to track in reverse time order
396
+ if is_init_cond_frame:
397
+ reverse = False
398
+ else:
399
+ reverse = inference_state["frames_already_tracked"][frame_idx]["reverse"]
400
+ obj_output_dict = inference_state["output_dict_per_obj"][obj_idx]
401
+ obj_temp_output_dict = inference_state["temp_output_dict_per_obj"][obj_idx]
402
+ # Add a frame to conditioning output if it's an initial conditioning frame or
403
+ # if the model sees all frames receiving clicks/mask as conditioning frames.
404
+ is_cond = is_init_cond_frame or self.add_all_frames_to_correct_as_cond
405
+ storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs"
406
+
407
+ current_out, _ = self._run_single_frame_inference(
408
+ inference_state=inference_state,
409
+ output_dict=obj_output_dict, # run on the slice of a single object
410
+ frame_idx=frame_idx,
411
+ batch_size=1, # run on the slice of a single object
412
+ is_init_cond_frame=is_init_cond_frame,
413
+ point_inputs=None,
414
+ mask_inputs=mask_inputs,
415
+ reverse=reverse,
416
+ # Skip the memory encoder when adding clicks or mask. We execute the memory encoder
417
+ # at the beginning of `propagate_in_video` (after user finalize their clicks). This
418
+ # allows us to enforce non-overlapping constraints on all objects before encoding
419
+ # them into memory.
420
+ run_mem_encoder=False,
421
+ )
422
+ # We directly use the input mask at video resolution as the output mask for a better
423
+ # video editing experience (so that the masks don't change after each brushing).
424
+ # Here NO_OBJ_SCORE is a large negative value to represent the background and
425
+ # similarly -NO_OBJ_SCORE is a large positive value to represent the foreground.
426
+ current_out["pred_masks"] = None
427
+ current_out["pred_masks_video_res"] = torch.where(
428
+ mask_inputs_video_res, -NO_OBJ_SCORE, NO_OBJ_SCORE
429
+ )
430
+ # Add the output to the output dict (to be used as future memory)
431
+ obj_temp_output_dict[storage_key][frame_idx] = current_out
432
+ # Remove the overlapping proportion of other objects' input masks on this frame
433
+ temp_output_dict_per_obj = inference_state["temp_output_dict_per_obj"]
434
+ for obj_idx2, obj_temp_output_dict2 in temp_output_dict_per_obj.items():
435
+ if obj_idx2 == obj_idx:
436
+ continue
437
+ current_out2 = obj_temp_output_dict2[storage_key].get(frame_idx, None)
438
+ if current_out2 is not None and "pred_masks_video_res" in current_out2:
439
+ current_out2["pred_masks_video_res"] = torch.where(
440
+ mask_inputs_video_res,
441
+ NO_OBJ_SCORE,
442
+ current_out2["pred_masks_video_res"],
443
+ )
444
+
445
+ # Resize the output mask to the original video resolution
446
+ obj_ids = inference_state["obj_ids"]
447
+ consolidated_out = self._consolidate_temp_output_across_obj(
448
+ inference_state,
449
+ frame_idx,
450
+ is_cond=is_cond,
451
+ run_mem_encoder=False,
452
+ consolidate_at_video_res=True,
453
+ )
454
+ _, video_res_masks = self._get_orig_video_res_output(
455
+ inference_state, consolidated_out["pred_masks_video_res"]
456
+ )
457
+ low_res_masks = None # not needed by the demo
458
+ return frame_idx, obj_ids, low_res_masks, video_res_masks
459
+
460
+ def add_new_points(self, *args, **kwargs):
461
+ """Deprecated method. Please use `add_new_points_or_box` instead."""
462
+ return self.add_new_points_or_box(*args, **kwargs)
463
+
464
+ def _get_orig_video_res_output(self, inference_state, any_res_masks):
465
+ """
466
+ Resize the object scores to the original video resolution (video_res_masks)
467
+ and apply non-overlapping constraints for final output.
468
+ """
469
+ device = inference_state["device"]
470
+ video_H = inference_state["video_height"]
471
+ video_W = inference_state["video_width"]
472
+ any_res_masks = any_res_masks.to(device, non_blocking=True)
473
+ if any_res_masks.shape[-2:] == (video_H, video_W):
474
+ video_res_masks = any_res_masks
475
+ else:
476
+ video_res_masks = torch.nn.functional.interpolate(
477
+ any_res_masks,
478
+ size=(video_H, video_W),
479
+ mode="bilinear",
480
+ align_corners=False,
481
+ )
482
+ if self.non_overlap_masks_for_output:
483
+ video_res_masks = self._apply_non_overlapping_constraints(video_res_masks)
484
+ # potentially fill holes in the predicted masks
485
+ if self.fill_hole_area > 0:
486
+ video_res_masks = fill_holes_in_mask_scores(
487
+ video_res_masks, self.fill_hole_area
488
+ )
489
+ return any_res_masks, video_res_masks
490
+
491
+ def _consolidate_temp_output_across_obj(
492
+ self,
493
+ inference_state,
494
+ frame_idx,
495
+ is_cond,
496
+ run_mem_encoder,
497
+ consolidate_at_video_res=False,
498
+ ):
499
+ """
500
+ Consolidate the per-object temporary outputs in `temp_output_dict_per_obj` on
501
+ a frame into a single output for all objects, including
502
+ 1) fill any missing objects either from `output_dict_per_obj` (if they exist in
503
+ `output_dict_per_obj` for this frame) or leave them as placeholder values
504
+ (if they don't exist in `output_dict_per_obj` for this frame);
505
+ 2) if specified, rerun memory encoder after apply non-overlapping constraints
506
+ on the object scores.
507
+ """
508
+ batch_size = self._get_obj_num(inference_state)
509
+ storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs"
510
+ # Optionally, we allow consolidating the temporary outputs at the original
511
+ # video resolution (to provide a better editing experience for mask prompts).
512
+ if consolidate_at_video_res:
513
+ assert not run_mem_encoder, "memory encoder cannot run at video resolution"
514
+ consolidated_H = inference_state["video_height"]
515
+ consolidated_W = inference_state["video_width"]
516
+ consolidated_mask_key = "pred_masks_video_res"
517
+ else:
518
+ consolidated_H = consolidated_W = self.low_res_mask_size
519
+ consolidated_mask_key = "pred_masks"
520
+
521
+ # Initialize `consolidated_out`. Its "maskmem_features" and "maskmem_pos_enc"
522
+ # will be added when rerunning the memory encoder after applying non-overlapping
523
+ # constraints to object scores. Its "pred_masks" are prefilled with a large
524
+ # negative value (NO_OBJ_SCORE) to represent missing objects.
525
+ consolidated_out = {
526
+ "maskmem_features": None,
527
+ "maskmem_pos_enc": None,
528
+ consolidated_mask_key: torch.full(
529
+ size=(batch_size, 1, consolidated_H, consolidated_W),
530
+ fill_value=NO_OBJ_SCORE,
531
+ dtype=torch.float32,
532
+ device=inference_state["storage_device"],
533
+ ),
534
+ "obj_ptr": torch.full(
535
+ size=(batch_size, self.hidden_dim),
536
+ fill_value=NO_OBJ_SCORE,
537
+ dtype=torch.float32,
538
+ device=inference_state["device"],
539
+ ),
540
+ "object_score_logits": torch.full(
541
+ size=(batch_size, 1),
542
+ # default to 10.0 for object_score_logits, i.e. assuming the object is
543
+ # present as sigmoid(10)=1, same as in `predict_masks` of `MaskDecoder`
544
+ fill_value=10.0,
545
+ dtype=torch.float32,
546
+ device=inference_state["device"],
547
+ ),
548
+ }
549
+ if self.use_memory_selection:
550
+ consolidated_out["iou_score"] = torch.full(
551
+ size=(batch_size, 1),
552
+ fill_value=0.0,
553
+ dtype=torch.float32,
554
+ device=inference_state["device"],
555
+ )
556
+ empty_mask_ptr = None
557
+ for obj_idx in range(batch_size):
558
+ obj_temp_output_dict = inference_state["temp_output_dict_per_obj"][obj_idx]
559
+ obj_output_dict = inference_state["output_dict_per_obj"][obj_idx]
560
+ out = obj_temp_output_dict[storage_key].get(frame_idx, None)
561
+ # If the object doesn't appear in "temp_output_dict_per_obj" on this frame,
562
+ # we fall back and look up its previous output in "output_dict_per_obj".
563
+ # We look up both "cond_frame_outputs" and "non_cond_frame_outputs" in
564
+ # "output_dict_per_obj" to find a previous output for this object.
565
+ if out is None:
566
+ out = obj_output_dict["cond_frame_outputs"].get(frame_idx, None)
567
+ if out is None:
568
+ out = obj_output_dict["non_cond_frame_outputs"].get(frame_idx, None)
569
+ # If the object doesn't appear in "output_dict_per_obj" either, we skip it
570
+ # and leave its mask scores to the default scores (i.e. the NO_OBJ_SCORE
571
+ # placeholder above) and set its object pointer to be a dummy pointer.
572
+ if out is None:
573
+ # Fill in dummy object pointers for those objects without any inputs or
574
+ # tracking outcomes on this frame (only do it under `run_mem_encoder=True`,
575
+ # i.e. when we need to build the memory for tracking).
576
+ if run_mem_encoder:
577
+ if empty_mask_ptr is None:
578
+ empty_mask_ptr = self._get_empty_mask_ptr(
579
+ inference_state, frame_idx
580
+ )
581
+ # fill object pointer with a dummy pointer (based on an empty mask)
582
+ consolidated_out["obj_ptr"][obj_idx : obj_idx + 1] = empty_mask_ptr
583
+ continue
584
+ # Add the temporary object output mask to consolidated output mask
585
+ # (use "pred_masks_video_res" if it's available)
586
+ obj_mask = out.get("pred_masks_video_res", out["pred_masks"])
587
+ consolidated_pred_masks = consolidated_out[consolidated_mask_key]
588
+ if obj_mask.shape[-2:] == consolidated_pred_masks.shape[-2:]:
589
+ consolidated_pred_masks[obj_idx : obj_idx + 1] = obj_mask
590
+ else:
591
+ # Resize first if temporary object mask has a different resolution
592
+ is_downsampling = "pred_masks_video_res" in out
593
+ resized_obj_mask = torch.nn.functional.interpolate(
594
+ obj_mask,
595
+ size=consolidated_pred_masks.shape[-2:],
596
+ mode="bilinear",
597
+ align_corners=False,
598
+ antialias=is_downsampling, # use antialias for downsampling
599
+ )
600
+ consolidated_pred_masks[obj_idx : obj_idx + 1] = resized_obj_mask
601
+ consolidated_out["obj_ptr"][obj_idx : obj_idx + 1] = out["obj_ptr"]
602
+ consolidated_out["object_score_logits"][obj_idx : obj_idx + 1] = out[
603
+ "object_score_logits"
604
+ ]
605
+ if self.use_memory_selection:
606
+ consolidated_out["iou_score"][obj_idx : obj_idx + 1] = out["iou_score"]
607
+ # Optionally, apply non-overlapping constraints on the consolidated scores
608
+ # and rerun the memory encoder
609
+ if run_mem_encoder:
610
+ device = inference_state["device"]
611
+ high_res_masks = torch.nn.functional.interpolate(
612
+ consolidated_out["pred_masks"].to(device, non_blocking=True),
613
+ size=(self.image_size, self.image_size),
614
+ mode="bilinear",
615
+ align_corners=False,
616
+ )
617
+ high_res_masks = self._apply_non_overlapping_constraints(high_res_masks)
618
+ maskmem_features, maskmem_pos_enc = self._run_memory_encoder(
619
+ inference_state=inference_state,
620
+ frame_idx=frame_idx,
621
+ batch_size=batch_size,
622
+ high_res_masks=high_res_masks,
623
+ object_score_logits=consolidated_out["object_score_logits"],
624
+ is_mask_from_pts=True, # these frames are what the user interacted with
625
+ )
626
+ consolidated_out["maskmem_features"] = maskmem_features
627
+ consolidated_out["maskmem_pos_enc"] = maskmem_pos_enc
628
+
629
+ return consolidated_out
630
+
631
+ def _get_empty_mask_ptr(self, inference_state, frame_idx):
632
+ """Get a dummy object pointer based on an empty mask on the current frame."""
633
+ # A dummy (empty) mask with a single object
634
+ batch_size = 1
635
+ mask_inputs = torch.zeros(
636
+ (batch_size, 1, self.image_size, self.image_size),
637
+ dtype=torch.float32,
638
+ device=inference_state["device"],
639
+ )
640
+
641
+ # Retrieve correct image features
642
+ (
643
+ image,
644
+ _,
645
+ current_vision_feats,
646
+ current_vision_pos_embeds,
647
+ feat_sizes,
648
+ ) = self._get_image_feature(inference_state, frame_idx, batch_size)
649
+
650
+ # Feed the empty mask and image feature above to get a dummy object pointer
651
+ current_out = self.track_step(
652
+ frame_idx=frame_idx,
653
+ is_init_cond_frame=True,
654
+ current_vision_feats=current_vision_feats,
655
+ current_vision_pos_embeds=current_vision_pos_embeds,
656
+ feat_sizes=feat_sizes,
657
+ image=image,
658
+ point_inputs=None,
659
+ mask_inputs=mask_inputs,
660
+ output_dict={
661
+ "cond_frame_outputs": {},
662
+ "non_cond_frame_outputs": {},
663
+ },
664
+ num_frames=inference_state["num_frames"],
665
+ track_in_reverse=False,
666
+ run_mem_encoder=False,
667
+ prev_sam_mask_logits=None,
668
+ )
669
+ return current_out["obj_ptr"]
670
+
671
+ @torch.inference_mode()
672
+ def propagate_in_video_preflight(self, inference_state, run_mem_encoder=True):
673
+ """Prepare inference_state and consolidate temporary outputs before tracking."""
674
+ # Tracking has started and we don't allow adding new objects until session is reset.
675
+ inference_state["tracking_has_started"] = True
676
+ batch_size = self._get_obj_num(inference_state)
677
+
678
+ # Consolidate per-object temporary outputs in "temp_output_dict_per_obj" and
679
+ # add them into "output_dict".
680
+ temp_output_dict_per_obj = inference_state["temp_output_dict_per_obj"]
681
+ output_dict = inference_state["output_dict"]
682
+ # "consolidated_frame_inds" contains indices of those frames where consolidated
683
+ # temporary outputs have been added (either in this call or any previous calls
684
+ # to `propagate_in_video_preflight`).
685
+ consolidated_frame_inds = inference_state["consolidated_frame_inds"]
686
+ for is_cond in [False, True]:
687
+ # Separately consolidate conditioning and non-conditioning temp outptus
688
+ storage_key = "cond_frame_outputs" if is_cond else "non_cond_frame_outputs"
689
+ # Find all the frames that contain temporary outputs for any objects
690
+ # (these should be the frames that have just received clicks for mask inputs
691
+ # via `add_new_points` or `add_new_mask`)
692
+ temp_frame_inds = set()
693
+ for obj_temp_output_dict in temp_output_dict_per_obj.values():
694
+ temp_frame_inds.update(obj_temp_output_dict[storage_key].keys())
695
+ consolidated_frame_inds[storage_key].update(temp_frame_inds)
696
+ # consolidate the temprary output across all objects on this frame
697
+ for frame_idx in temp_frame_inds:
698
+ consolidated_out = self._consolidate_temp_output_across_obj(
699
+ inference_state,
700
+ frame_idx,
701
+ is_cond=is_cond,
702
+ run_mem_encoder=run_mem_encoder,
703
+ )
704
+ # merge them into "output_dict" and also create per-object slices
705
+ output_dict[storage_key][frame_idx] = consolidated_out
706
+ self._add_output_per_object(
707
+ inference_state, frame_idx, consolidated_out, storage_key
708
+ )
709
+ clear_non_cond_mem = self.clear_non_cond_mem_around_input and (
710
+ self.clear_non_cond_mem_for_multi_obj or batch_size <= 1
711
+ )
712
+ if clear_non_cond_mem:
713
+ # clear non-conditioning memory of the surrounding frames
714
+ self._clear_non_cond_mem_around_input(inference_state, frame_idx)
715
+
716
+ # clear temporary outputs in `temp_output_dict_per_obj`
717
+ for obj_temp_output_dict in temp_output_dict_per_obj.values():
718
+ obj_temp_output_dict[storage_key].clear()
719
+
720
+ # edge case: if an output is added to "cond_frame_outputs", we remove any prior
721
+ # output on the same frame in "non_cond_frame_outputs"
722
+ for frame_idx in output_dict["cond_frame_outputs"]:
723
+ output_dict["non_cond_frame_outputs"].pop(frame_idx, None)
724
+ for obj_output_dict in inference_state["output_dict_per_obj"].values():
725
+ for frame_idx in obj_output_dict["cond_frame_outputs"]:
726
+ obj_output_dict["non_cond_frame_outputs"].pop(frame_idx, None)
727
+ for frame_idx in consolidated_frame_inds["cond_frame_outputs"]:
728
+ assert frame_idx in output_dict["cond_frame_outputs"]
729
+ consolidated_frame_inds["non_cond_frame_outputs"].discard(frame_idx)
730
+
731
+ # Make sure that the frame indices in "consolidated_frame_inds" are exactly those frames
732
+ # with either points or mask inputs (which should be true under a correct demo workflow).
733
+ all_consolidated_frame_inds = (
734
+ consolidated_frame_inds["cond_frame_outputs"]
735
+ | consolidated_frame_inds["non_cond_frame_outputs"]
736
+ )
737
+ input_frames_inds = set()
738
+ for point_inputs_per_frame in inference_state["point_inputs_per_obj"].values():
739
+ input_frames_inds.update(point_inputs_per_frame.keys())
740
+ for mask_inputs_per_frame in inference_state["mask_inputs_per_obj"].values():
741
+ input_frames_inds.update(mask_inputs_per_frame.keys())
742
+ assert all_consolidated_frame_inds == input_frames_inds
743
+ # Record the first interacted frame index (for tracking start)
744
+ if inference_state["first_ann_frame_idx"] is None:
745
+ inference_state["first_ann_frame_idx"] = min(
746
+ input_frames_inds, default=None
747
+ )
748
+ # In case `first_ann_frame_idx` is not in the conditioning frames (e.g. because
749
+ # we cleared the input points on that frame), pick the first conditioning frame
750
+ if (
751
+ inference_state["first_ann_frame_idx"]
752
+ not in output_dict["cond_frame_outputs"]
753
+ ):
754
+ inference_state["first_ann_frame_idx"] = min(
755
+ output_dict["cond_frame_outputs"], default=None
756
+ )
757
+
758
+ def _get_processing_order(
759
+ self, inference_state, start_frame_idx, max_frame_num_to_track, reverse
760
+ ):
761
+ num_frames = inference_state["num_frames"]
762
+ # set start index, end index, and processing order
763
+ if self.always_start_from_first_ann_frame:
764
+ # in this case, we always start tracking from the frame where we receive
765
+ # the initial annotation and ignore the provided start_frame_idx
766
+ start_frame_idx = inference_state["first_ann_frame_idx"]
767
+ if start_frame_idx is None:
768
+ # default: start from the earliest frame with input points
769
+ start_frame_idx = min(inference_state["output_dict"]["cond_frame_outputs"])
770
+ if max_frame_num_to_track is None:
771
+ # default: track all the frames in the video
772
+ max_frame_num_to_track = num_frames
773
+ if reverse:
774
+ end_frame_idx = max(start_frame_idx - max_frame_num_to_track, 0)
775
+ if start_frame_idx > 0:
776
+ processing_order = range(start_frame_idx, end_frame_idx - 1, -1)
777
+ else:
778
+ # this is the edge case where we start from frame 0 and track in reverse order;
779
+ # in this case, we track a single frame (frame 0)
780
+ processing_order = [0]
781
+ else:
782
+ end_frame_idx = min(
783
+ start_frame_idx + max_frame_num_to_track, num_frames - 1
784
+ )
785
+ processing_order = range(start_frame_idx, end_frame_idx + 1)
786
+ return processing_order
787
+
788
+ @torch.inference_mode()
789
+ def propagate_in_video(
790
+ self,
791
+ inference_state,
792
+ start_frame_idx,
793
+ max_frame_num_to_track,
794
+ reverse,
795
+ tqdm_disable=False,
796
+ obj_ids=None,
797
+ run_mem_encoder=True,
798
+ propagate_preflight=False,
799
+ ):
800
+ """Propagate the input points across frames to track in the entire video."""
801
+ if propagate_preflight:
802
+ self.propagate_in_video_preflight(inference_state)
803
+ # NOTE: This is a copy from the parent class, except that we return object scores as well.
804
+ output_dict = inference_state["output_dict"]
805
+ consolidated_frame_inds = inference_state["consolidated_frame_inds"]
806
+ if obj_ids is not None:
807
+ raise NotImplementedError(
808
+ "Per-object tracking yet for batched inference if not implemented."
809
+ )
810
+ obj_ids = inference_state["obj_ids"]
811
+ batch_size = self._get_obj_num(inference_state)
812
+ if len(output_dict["cond_frame_outputs"]) == 0:
813
+ raise RuntimeError("No points are provided; please add points first")
814
+ clear_non_cond_mem = self.clear_non_cond_mem_around_input and (
815
+ self.clear_non_cond_mem_for_multi_obj or batch_size <= 1
816
+ )
817
+
818
+ processing_order = self._get_processing_order(
819
+ inference_state,
820
+ start_frame_idx,
821
+ max_frame_num_to_track,
822
+ reverse,
823
+ )
824
+
825
+ for frame_idx in tqdm(
826
+ processing_order, desc="propagate in video", disable=tqdm_disable
827
+ ):
828
+ # We skip those frames already in consolidated outputs (these are frames
829
+ # that received input clicks or mask). Note that we cannot directly run
830
+ # batched forward on them via `_run_single_frame_inference` because the
831
+ # number of clicks on each object might be different.
832
+ if frame_idx in consolidated_frame_inds["cond_frame_outputs"]:
833
+ storage_key = "cond_frame_outputs"
834
+ current_out = output_dict[storage_key][frame_idx]
835
+ pred_masks = current_out["pred_masks"]
836
+ obj_scores = current_out["object_score_logits"]
837
+ if clear_non_cond_mem:
838
+ # clear non-conditioning memory of the surrounding frames
839
+ self._clear_non_cond_mem_around_input(inference_state, frame_idx)
840
+ elif frame_idx in consolidated_frame_inds["non_cond_frame_outputs"]:
841
+ storage_key = "non_cond_frame_outputs"
842
+ current_out = output_dict[storage_key][frame_idx]
843
+ pred_masks = current_out["pred_masks"]
844
+ obj_scores = current_out["object_score_logits"]
845
+ else:
846
+ storage_key = "non_cond_frame_outputs"
847
+ current_out, pred_masks = self._run_single_frame_inference(
848
+ inference_state=inference_state,
849
+ output_dict=output_dict,
850
+ frame_idx=frame_idx,
851
+ batch_size=batch_size,
852
+ is_init_cond_frame=False,
853
+ point_inputs=None,
854
+ mask_inputs=None,
855
+ reverse=reverse,
856
+ run_mem_encoder=run_mem_encoder,
857
+ )
858
+ obj_scores = current_out["object_score_logits"]
859
+ output_dict[storage_key][frame_idx] = current_out
860
+ # Create slices of per-object outputs for subsequent interaction with each
861
+ # individual object after tracking.
862
+ self._add_output_per_object(
863
+ inference_state, frame_idx, current_out, storage_key
864
+ )
865
+ inference_state["frames_already_tracked"][frame_idx] = {"reverse": reverse}
866
+
867
+ # Resize the output mask to the original video resolution (we directly use
868
+ # the mask scores on GPU for output to avoid any CPU conversion in between)
869
+ low_res_masks, video_res_masks = self._get_orig_video_res_output(
870
+ inference_state, pred_masks
871
+ )
872
+ yield frame_idx, obj_ids, low_res_masks, video_res_masks, obj_scores
873
+
874
+ def _add_output_per_object(
875
+ self, inference_state, frame_idx, current_out, storage_key
876
+ ):
877
+ """
878
+ Split a multi-object output into per-object output slices and add them into
879
+ `output_dict_per_obj`. The resulting slices share the same tensor storage.
880
+ """
881
+ maskmem_features = current_out["maskmem_features"]
882
+ assert maskmem_features is None or isinstance(maskmem_features, torch.Tensor)
883
+
884
+ maskmem_pos_enc = current_out["maskmem_pos_enc"]
885
+ assert maskmem_pos_enc is None or isinstance(maskmem_pos_enc, list)
886
+
887
+ output_dict_per_obj = inference_state["output_dict_per_obj"]
888
+ for obj_idx, obj_output_dict in output_dict_per_obj.items():
889
+ obj_slice = slice(obj_idx, obj_idx + 1)
890
+ obj_out = {
891
+ "maskmem_features": None,
892
+ "maskmem_pos_enc": None,
893
+ "pred_masks": current_out["pred_masks"][obj_slice],
894
+ "obj_ptr": current_out["obj_ptr"][obj_slice],
895
+ "object_score_logits": current_out["object_score_logits"][obj_slice],
896
+ }
897
+ if self.use_memory_selection:
898
+ obj_out["iou_score"] = current_out["iou_score"][obj_slice]
899
+ if maskmem_features is not None:
900
+ obj_out["maskmem_features"] = maskmem_features[obj_slice]
901
+ if maskmem_pos_enc is not None:
902
+ obj_out["maskmem_pos_enc"] = [x[obj_slice] for x in maskmem_pos_enc]
903
+ obj_output_dict[storage_key][frame_idx] = obj_out
904
+
905
+ @torch.inference_mode()
906
+ def clear_all_points_in_frame(
907
+ self, inference_state, frame_idx, obj_id, need_output=True
908
+ ):
909
+ """Remove all input points or mask in a specific frame for a given object."""
910
+ obj_idx = self._obj_id_to_idx(inference_state, obj_id)
911
+
912
+ # Clear the conditioning information on the given frame
913
+ inference_state["point_inputs_per_obj"][obj_idx].pop(frame_idx, None)
914
+ inference_state["mask_inputs_per_obj"][obj_idx].pop(frame_idx, None)
915
+
916
+ temp_output_dict_per_obj = inference_state["temp_output_dict_per_obj"]
917
+ temp_output_dict_per_obj[obj_idx]["cond_frame_outputs"].pop(frame_idx, None)
918
+ temp_output_dict_per_obj[obj_idx]["non_cond_frame_outputs"].pop(frame_idx, None)
919
+
920
+ # Check and see if there are still any inputs left on this frame
921
+ batch_size = self._get_obj_num(inference_state)
922
+ frame_has_input = False
923
+ for obj_idx2 in range(batch_size):
924
+ if frame_idx in inference_state["point_inputs_per_obj"][obj_idx2]:
925
+ frame_has_input = True
926
+ break
927
+ if frame_idx in inference_state["mask_inputs_per_obj"][obj_idx2]:
928
+ frame_has_input = True
929
+ break
930
+
931
+ # If this frame has no remaining inputs for any objects, we further clear its
932
+ # conditioning frame status
933
+ if not frame_has_input:
934
+ output_dict = inference_state["output_dict"]
935
+ consolidated_frame_inds = inference_state["consolidated_frame_inds"]
936
+ consolidated_frame_inds["cond_frame_outputs"].discard(frame_idx)
937
+ consolidated_frame_inds["non_cond_frame_outputs"].discard(frame_idx)
938
+ # Remove the frame's conditioning output (possibly downgrading it to non-conditioning)
939
+ out = output_dict["cond_frame_outputs"].pop(frame_idx, None)
940
+ if out is not None:
941
+ # The frame is not a conditioning frame anymore since it's not receiving inputs,
942
+ # so we "downgrade" its output (if exists) to a non-conditioning frame output.
943
+ output_dict["non_cond_frame_outputs"][frame_idx] = out
944
+ inference_state["frames_already_tracked"].pop(frame_idx, None)
945
+ # Similarly, do it for the sliced output on each object.
946
+ for obj_idx2 in range(batch_size):
947
+ obj_output_dict = inference_state["output_dict_per_obj"][obj_idx2]
948
+ obj_out = obj_output_dict["cond_frame_outputs"].pop(frame_idx, None)
949
+ if obj_out is not None:
950
+ obj_output_dict["non_cond_frame_outputs"][frame_idx] = obj_out
951
+
952
+ # If all the conditioning frames have been removed, we also clear the tracking outputs
953
+ if len(output_dict["cond_frame_outputs"]) == 0:
954
+ self._reset_tracking_results(inference_state)
955
+
956
+ if not need_output:
957
+ return
958
+ # Finally, output updated masks per object (after removing the inputs above)
959
+ obj_ids = inference_state["obj_ids"]
960
+ is_cond = any(
961
+ frame_idx in obj_temp_output_dict["cond_frame_outputs"]
962
+ for obj_temp_output_dict in temp_output_dict_per_obj.values()
963
+ )
964
+ consolidated_out = self._consolidate_temp_output_across_obj(
965
+ inference_state,
966
+ frame_idx,
967
+ is_cond=is_cond,
968
+ run_mem_encoder=False,
969
+ consolidate_at_video_res=True,
970
+ )
971
+ _, video_res_masks = self._get_orig_video_res_output(
972
+ inference_state, consolidated_out["pred_masks_video_res"]
973
+ )
974
+ low_res_masks = None # not needed by the demo
975
+ return frame_idx, obj_ids, low_res_masks, video_res_masks
976
+
977
+ @torch.inference_mode()
978
+ def clear_all_points_in_video(self, inference_state):
979
+ """Remove all input points or mask in all frames throughout the video."""
980
+ self._reset_tracking_results(inference_state)
981
+ # Remove all object ids
982
+ inference_state["obj_id_to_idx"].clear()
983
+ inference_state["obj_idx_to_id"].clear()
984
+ inference_state["obj_ids"].clear()
985
+ inference_state["point_inputs_per_obj"].clear()
986
+ inference_state["mask_inputs_per_obj"].clear()
987
+ inference_state["output_dict_per_obj"].clear()
988
+ inference_state["temp_output_dict_per_obj"].clear()
989
+
990
+ def _reset_tracking_results(self, inference_state):
991
+ """Reset all tracking inputs and results across the videos."""
992
+ for v in inference_state["point_inputs_per_obj"].values():
993
+ v.clear()
994
+ for v in inference_state["mask_inputs_per_obj"].values():
995
+ v.clear()
996
+ for v in inference_state["output_dict_per_obj"].values():
997
+ v["cond_frame_outputs"].clear()
998
+ v["non_cond_frame_outputs"].clear()
999
+ for v in inference_state["temp_output_dict_per_obj"].values():
1000
+ v["cond_frame_outputs"].clear()
1001
+ v["non_cond_frame_outputs"].clear()
1002
+ inference_state["output_dict"]["cond_frame_outputs"].clear()
1003
+ inference_state["output_dict"]["non_cond_frame_outputs"].clear()
1004
+ inference_state["consolidated_frame_inds"]["cond_frame_outputs"].clear()
1005
+ inference_state["consolidated_frame_inds"]["non_cond_frame_outputs"].clear()
1006
+ inference_state["tracking_has_started"] = False
1007
+ inference_state["frames_already_tracked"].clear()
1008
+ inference_state["first_ann_frame_idx"] = None
1009
+
1010
+ def _get_image_feature(self, inference_state, frame_idx, batch_size):
1011
+ """Compute the image features on a given frame."""
1012
+ # Look up in the cache
1013
+ image, backbone_out = inference_state["cached_features"].get(
1014
+ frame_idx, (None, None)
1015
+ )
1016
+ if backbone_out is None:
1017
+ if self.backbone is None:
1018
+ raise RuntimeError(
1019
+ f"Image features for frame {frame_idx} are not cached. "
1020
+ "Please run inference on this frame first."
1021
+ )
1022
+ else:
1023
+ # Cache miss -- we will run inference on a single image
1024
+ image = inference_state["images"][frame_idx].cuda().float().unsqueeze(0)
1025
+ backbone_out = self.forward_image(image)
1026
+ # Cache the most recent frame's feature (for repeated interactions with
1027
+ # a frame; we can use an LRU cache for more frames in the future).
1028
+ inference_state["cached_features"] = {frame_idx: (image, backbone_out)}
1029
+ if "tracker_backbone_out" in backbone_out:
1030
+ backbone_out = backbone_out["tracker_backbone_out"] # get backbone output
1031
+
1032
+ # expand the features to have the same dimension as the number of objects
1033
+ expanded_image = image.expand(batch_size, -1, -1, -1)
1034
+ expanded_backbone_out = {
1035
+ "backbone_fpn": backbone_out["backbone_fpn"].copy(),
1036
+ "vision_pos_enc": backbone_out["vision_pos_enc"].copy(),
1037
+ }
1038
+ for i, feat in enumerate(expanded_backbone_out["backbone_fpn"]):
1039
+ feat = feat.expand(batch_size, -1, -1, -1)
1040
+ expanded_backbone_out["backbone_fpn"][i] = feat
1041
+ for i, pos in enumerate(expanded_backbone_out["vision_pos_enc"]):
1042
+ pos = pos.expand(batch_size, -1, -1, -1)
1043
+ expanded_backbone_out["vision_pos_enc"][i] = pos
1044
+
1045
+ features = self._prepare_backbone_features(expanded_backbone_out)
1046
+ features = (expanded_image,) + features
1047
+ return features
1048
+
1049
+ def _run_single_frame_inference(
1050
+ self,
1051
+ inference_state,
1052
+ output_dict,
1053
+ frame_idx,
1054
+ batch_size,
1055
+ is_init_cond_frame,
1056
+ point_inputs,
1057
+ mask_inputs,
1058
+ reverse,
1059
+ run_mem_encoder,
1060
+ prev_sam_mask_logits=None,
1061
+ use_prev_mem_frame=True,
1062
+ ):
1063
+ """Run tracking on a single frame based on current inputs and previous memory."""
1064
+ # Retrieve correct image features
1065
+ (
1066
+ image,
1067
+ _,
1068
+ current_vision_feats,
1069
+ current_vision_pos_embeds,
1070
+ feat_sizes,
1071
+ ) = self._get_image_feature(inference_state, frame_idx, batch_size)
1072
+
1073
+ # point and mask should not appear as input simultaneously on the same frame
1074
+ assert point_inputs is None or mask_inputs is None
1075
+ current_out = self.track_step(
1076
+ frame_idx=frame_idx,
1077
+ is_init_cond_frame=is_init_cond_frame,
1078
+ current_vision_feats=current_vision_feats,
1079
+ current_vision_pos_embeds=current_vision_pos_embeds,
1080
+ feat_sizes=feat_sizes,
1081
+ image=image,
1082
+ point_inputs=point_inputs,
1083
+ mask_inputs=mask_inputs,
1084
+ output_dict=output_dict,
1085
+ num_frames=inference_state["num_frames"],
1086
+ track_in_reverse=reverse,
1087
+ run_mem_encoder=run_mem_encoder,
1088
+ prev_sam_mask_logits=prev_sam_mask_logits,
1089
+ use_prev_mem_frame=use_prev_mem_frame,
1090
+ )
1091
+
1092
+ # optionally offload the output to CPU memory to save GPU space
1093
+ storage_device = inference_state["storage_device"]
1094
+ maskmem_features = current_out["maskmem_features"]
1095
+ if maskmem_features is not None:
1096
+ maskmem_features = maskmem_features.to(torch.bfloat16)
1097
+ maskmem_features = maskmem_features.to(storage_device, non_blocking=True)
1098
+ pred_masks_gpu = current_out["pred_masks"]
1099
+ pred_masks = pred_masks_gpu.to(storage_device, non_blocking=True)
1100
+ # "maskmem_pos_enc" is the same across frames, so we only need to store one copy of it
1101
+ maskmem_pos_enc = self._get_maskmem_pos_enc(inference_state, current_out)
1102
+ # object pointer is a small tensor, so we always keep it on GPU memory for fast access
1103
+ obj_ptr = current_out["obj_ptr"]
1104
+ object_score_logits = current_out["object_score_logits"]
1105
+ # make a compact version of this frame's output to reduce the state size
1106
+ compact_current_out = {
1107
+ "maskmem_features": maskmem_features,
1108
+ "maskmem_pos_enc": maskmem_pos_enc,
1109
+ "pred_masks": pred_masks,
1110
+ "obj_ptr": obj_ptr,
1111
+ "object_score_logits": object_score_logits,
1112
+ }
1113
+ if self.use_memory_selection:
1114
+ compact_current_out["iou_score"] = current_out["iou_score"]
1115
+ compact_current_out["eff_iou_score"] = current_out["eff_iou_score"]
1116
+ return compact_current_out, pred_masks_gpu
1117
+
1118
+ def _run_memory_encoder(
1119
+ self,
1120
+ inference_state,
1121
+ frame_idx,
1122
+ batch_size,
1123
+ high_res_masks,
1124
+ object_score_logits,
1125
+ is_mask_from_pts,
1126
+ ):
1127
+ """
1128
+ Run the memory encoder on `high_res_masks`. This is usually after applying
1129
+ non-overlapping constraints to object scores. Since their scores changed, their
1130
+ memory also need to be computed again with the memory encoder.
1131
+ """
1132
+ # Retrieve correct image features
1133
+ image, _, current_vision_feats, _, feat_sizes = self._get_image_feature(
1134
+ inference_state, frame_idx, batch_size
1135
+ )
1136
+ maskmem_features, maskmem_pos_enc = self._encode_new_memory(
1137
+ image=image,
1138
+ current_vision_feats=current_vision_feats,
1139
+ feat_sizes=feat_sizes,
1140
+ pred_masks_high_res=high_res_masks,
1141
+ object_score_logits=object_score_logits,
1142
+ is_mask_from_pts=is_mask_from_pts,
1143
+ )
1144
+
1145
+ # optionally offload the output to CPU memory to save GPU space
1146
+ storage_device = inference_state["storage_device"]
1147
+ maskmem_features = maskmem_features.to(torch.bfloat16)
1148
+ maskmem_features = maskmem_features.to(storage_device, non_blocking=True)
1149
+ # "maskmem_pos_enc" is the same across frames, so we only need to store one copy of it
1150
+ maskmem_pos_enc = self._get_maskmem_pos_enc(
1151
+ inference_state, {"maskmem_pos_enc": maskmem_pos_enc}
1152
+ )
1153
+ return maskmem_features, maskmem_pos_enc
1154
+
1155
+ def _get_maskmem_pos_enc(self, inference_state, current_out):
1156
+ """
1157
+ `maskmem_pos_enc` is the same across frames and objects, so we cache it as
1158
+ a constant in the inference session to reduce session storage size.
1159
+ """
1160
+ model_constants = inference_state["constants"]
1161
+ # "out_maskmem_pos_enc" should be either a list of tensors or None
1162
+ out_maskmem_pos_enc = current_out["maskmem_pos_enc"]
1163
+ if out_maskmem_pos_enc is not None:
1164
+ if "maskmem_pos_enc" not in model_constants:
1165
+ assert isinstance(out_maskmem_pos_enc, list)
1166
+ # only take the slice for one object, since it's same across objects
1167
+ maskmem_pos_enc = [x[0:1].clone() for x in out_maskmem_pos_enc]
1168
+ model_constants["maskmem_pos_enc"] = maskmem_pos_enc
1169
+ else:
1170
+ maskmem_pos_enc = model_constants["maskmem_pos_enc"]
1171
+ # expand the cached maskmem_pos_enc to the actual batch size
1172
+ batch_size = out_maskmem_pos_enc[0].size(0)
1173
+ expanded_maskmem_pos_enc = [
1174
+ x.expand(batch_size, -1, -1, -1) for x in maskmem_pos_enc
1175
+ ]
1176
+ else:
1177
+ expanded_maskmem_pos_enc = None
1178
+ return expanded_maskmem_pos_enc
1179
+
1180
+ @torch.inference_mode()
1181
+ def remove_object(self, inference_state, obj_id, strict=False, need_output=True):
1182
+ """
1183
+ Remove an object id from the tracking state. If strict is True, we check whether
1184
+ the object id actually exists and raise an error if it doesn't exist.
1185
+ """
1186
+ old_obj_idx_to_rm = inference_state["obj_id_to_idx"].get(obj_id, None)
1187
+ updated_frames = []
1188
+ # Check whether this object_id to remove actually exists and possibly raise an error.
1189
+ if old_obj_idx_to_rm is None:
1190
+ if not strict:
1191
+ return inference_state["obj_ids"], updated_frames
1192
+ raise RuntimeError(
1193
+ f"Cannot remove object id {obj_id} as it doesn't exist. "
1194
+ f"All existing object ids: {inference_state['obj_ids']}."
1195
+ )
1196
+
1197
+ # If this is the only remaining object id, we simply reset the state.
1198
+ if len(inference_state["obj_id_to_idx"]) == 1:
1199
+ self.clear_all_points_in_video(inference_state)
1200
+ return inference_state["obj_ids"], updated_frames
1201
+
1202
+ # There are still remaining objects after removing this object id. In this case,
1203
+ # we need to delete the object storage from inference state tensors.
1204
+ # Step 0: clear the input on those frames where this object id has point or mask input
1205
+ # (note that this step is required as it might downgrade conditioning frames to
1206
+ # non-conditioning ones)
1207
+ obj_input_frames_inds = set()
1208
+ obj_input_frames_inds.update(
1209
+ inference_state["point_inputs_per_obj"][old_obj_idx_to_rm]
1210
+ )
1211
+ obj_input_frames_inds.update(
1212
+ inference_state["mask_inputs_per_obj"][old_obj_idx_to_rm]
1213
+ )
1214
+ for frame_idx in obj_input_frames_inds:
1215
+ self.clear_all_points_in_frame(
1216
+ inference_state, frame_idx, obj_id, need_output=False
1217
+ )
1218
+
1219
+ # Step 1: Update the object id mapping (note that it must be done after Step 0,
1220
+ # since Step 0 still requires the old object id mappings in inference_state)
1221
+ old_obj_ids = inference_state["obj_ids"]
1222
+ old_obj_inds = list(range(len(old_obj_ids)))
1223
+ remain_old_obj_inds = old_obj_inds.copy()
1224
+ remain_old_obj_inds.remove(old_obj_idx_to_rm)
1225
+ new_obj_ids = [old_obj_ids[old_idx] for old_idx in remain_old_obj_inds]
1226
+ new_obj_inds = list(range(len(new_obj_ids)))
1227
+ # build new mappings
1228
+ old_idx_to_new_idx = dict(zip(remain_old_obj_inds, new_obj_inds))
1229
+ inference_state["obj_id_to_idx"] = dict(zip(new_obj_ids, new_obj_inds))
1230
+ inference_state["obj_idx_to_id"] = dict(zip(new_obj_inds, new_obj_ids))
1231
+ inference_state["obj_ids"] = new_obj_ids
1232
+
1233
+ # Step 2: For per-object tensor storage, we shift their obj_idx in the dict keys.
1234
+ # (note that "consolidated_frame_inds" doesn't need to be updated in this step as
1235
+ # it's already handled in Step 0)
1236
+ def _map_keys(container):
1237
+ new_kvs = []
1238
+ for k in old_obj_inds:
1239
+ v = container.pop(k)
1240
+ if k in old_idx_to_new_idx:
1241
+ new_kvs.append((old_idx_to_new_idx[k], v))
1242
+ container.update(new_kvs)
1243
+
1244
+ _map_keys(inference_state["point_inputs_per_obj"])
1245
+ _map_keys(inference_state["mask_inputs_per_obj"])
1246
+ _map_keys(inference_state["output_dict_per_obj"])
1247
+ _map_keys(inference_state["temp_output_dict_per_obj"])
1248
+
1249
+ # Step 3: For packed tensor storage, we index the remaining ids and rebuild the per-object slices.
1250
+ def _slice_state(output_dict, storage_key):
1251
+ for frame_idx, out in output_dict[storage_key].items():
1252
+ out["maskmem_features"] = out["maskmem_features"][remain_old_obj_inds]
1253
+ out["maskmem_pos_enc"] = [
1254
+ x[remain_old_obj_inds] for x in out["maskmem_pos_enc"]
1255
+ ]
1256
+ # "maskmem_pos_enc" is the same across frames, so we only need to store one copy of it
1257
+ out["maskmem_pos_enc"] = self._get_maskmem_pos_enc(inference_state, out)
1258
+ out["pred_masks"] = out["pred_masks"][remain_old_obj_inds]
1259
+ out["obj_ptr"] = out["obj_ptr"][remain_old_obj_inds]
1260
+ out["object_score_logits"] = out["object_score_logits"][
1261
+ remain_old_obj_inds
1262
+ ]
1263
+ if self.use_memory_selection:
1264
+ out["iou_score"] = out["iou_score"][remain_old_obj_inds]
1265
+ out["eff_iou_score"] = self.cal_mem_score(
1266
+ out["object_score_logits"], out["iou_score"]
1267
+ ) # recalculate the memory frame score
1268
+ # also update the per-object slices
1269
+ self._add_output_per_object(
1270
+ inference_state, frame_idx, out, storage_key
1271
+ )
1272
+
1273
+ _slice_state(inference_state["output_dict"], "cond_frame_outputs")
1274
+ _slice_state(inference_state["output_dict"], "non_cond_frame_outputs")
1275
+
1276
+ # Step 4: Further collect the outputs on those frames in `obj_input_frames_inds`, which
1277
+ # could show an updated mask for objects previously occluded by the object being removed
1278
+ if need_output:
1279
+ temp_output_dict_per_obj = inference_state["temp_output_dict_per_obj"]
1280
+ for frame_idx in obj_input_frames_inds:
1281
+ is_cond = any(
1282
+ frame_idx in obj_temp_output_dict["cond_frame_outputs"]
1283
+ for obj_temp_output_dict in temp_output_dict_per_obj.values()
1284
+ )
1285
+ consolidated_out = self._consolidate_temp_output_across_obj(
1286
+ inference_state,
1287
+ frame_idx,
1288
+ is_cond=is_cond,
1289
+ run_mem_encoder=False,
1290
+ consolidate_at_video_res=True,
1291
+ )
1292
+ _, video_res_masks = self._get_orig_video_res_output(
1293
+ inference_state, consolidated_out["pred_masks_video_res"]
1294
+ )
1295
+ updated_frames.append((frame_idx, video_res_masks))
1296
+
1297
+ return inference_state["obj_ids"], updated_frames
1298
+
1299
+ def _clear_non_cond_mem_around_input(self, inference_state, frame_idx):
1300
+ """
1301
+ Remove the non-conditioning memory around the input frame. When users provide
1302
+ correction clicks, the surrounding frames' non-conditioning memories can still
1303
+ contain outdated object appearance information and could confuse the model.
1304
+
1305
+ This method clears those non-conditioning memories surrounding the interacted
1306
+ frame to avoid giving the model both old and new information about the object.
1307
+ """
1308
+ r = self.memory_temporal_stride_for_eval
1309
+ frame_idx_begin = frame_idx - r * self.num_maskmem
1310
+ frame_idx_end = frame_idx + r * self.num_maskmem
1311
+ batch_size = self._get_obj_num(inference_state)
1312
+ for obj_idx in range(batch_size):
1313
+ obj_output_dict = inference_state["output_dict_per_obj"][obj_idx]
1314
+ non_cond_frame_outputs = obj_output_dict["non_cond_frame_outputs"]
1315
+ for t in range(frame_idx_begin, frame_idx_end + 1):
1316
+ non_cond_frame_outputs.pop(t, None)
1317
+
1318
+ def _suppress_shrinked_masks(
1319
+ self, pred_masks, new_pred_masks, shrink_threshold=0.3
1320
+ ):
1321
+ area_before = (pred_masks > 0).sum(dim=(-1, -2))
1322
+ area_after = (new_pred_masks > 0).sum(dim=(-1, -2))
1323
+ area_before = torch.clamp(area_before, min=1.0)
1324
+ area_ratio = area_after / area_before
1325
+ keep = area_ratio >= shrink_threshold
1326
+ keep_mask = keep[..., None, None].expand_as(pred_masks)
1327
+ pred_masks_after = torch.where(
1328
+ keep_mask, pred_masks, torch.clamp(pred_masks, max=-10.0)
1329
+ )
1330
+ return pred_masks_after
1331
+
1332
+ def _suppress_object_pw_area_shrinkage(self, pred_masks):
1333
+ """
1334
+ This function suppresses masks that shrink in area after applying pixelwise non-overlapping constriants.
1335
+ Note that the final output can still be overlapping.
1336
+ """
1337
+ # Apply pixel-wise non-overlapping constraint based on mask scores
1338
+ pixel_level_non_overlapping_masks = super()._apply_non_overlapping_constraints(
1339
+ pred_masks
1340
+ )
1341
+ # Fully suppress masks with high shrinkage (probably noisy) based on the pixel wise non-overlapping constraints
1342
+ # NOTE: The output of this function can be a no op if none of the masks shrinked by a large factor.
1343
+ pred_masks = self._suppress_shrinked_masks(
1344
+ pred_masks, pixel_level_non_overlapping_masks
1345
+ )
1346
+ return pred_masks
1347
+
1348
+ def _apply_object_wise_non_overlapping_constraints(
1349
+ self, pred_masks, obj_scores, background_value=-10.0
1350
+ ):
1351
+ """
1352
+ Applies non-overlapping constraints object wise (i.e. only one object can claim the overlapping region)
1353
+ """
1354
+ # Replace pixel scores with object scores
1355
+ pred_masks_single_score = torch.where(
1356
+ pred_masks > 0, obj_scores[..., None, None], background_value
1357
+ )
1358
+ # Apply pixel-wise non-overlapping constraint based on mask scores
1359
+ pixel_level_non_overlapping_masks = super()._apply_non_overlapping_constraints(
1360
+ pred_masks_single_score
1361
+ )
1362
+ # Replace object scores with pixel scores. Note, that now only one object can claim the overlapping region
1363
+ pred_masks = torch.where(
1364
+ pixel_level_non_overlapping_masks > 0,
1365
+ pred_masks,
1366
+ torch.clamp(pred_masks, max=background_value),
1367
+ )
1368
+ return pred_masks
source_code/sam3/sam3/model/sam3_video_inference.py ADDED
@@ -0,0 +1,1709 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
2
+
3
+ import logging
4
+ from collections import defaultdict
5
+
6
+ import numpy as np
7
+ import torch
8
+ import torch.distributed as dist
9
+ import torch.nn.functional as F
10
+
11
+ from sam3 import perflib
12
+ from sam3.logger import get_logger
13
+ from sam3.model.act_ckpt_utils import clone_output_wrapper
14
+ from sam3.model.box_ops import box_xywh_to_cxcywh, box_xyxy_to_xywh
15
+ from sam3.model.data_misc import BatchedDatapoint, convert_my_tensors, FindStage
16
+ from sam3.model.geometry_encoders import Prompt
17
+ from sam3.model.io_utils import IMAGE_EXTS, load_resource_as_video_frames
18
+ from sam3.model.sam3_tracker_utils import fill_holes_in_mask_scores
19
+ from sam3.model.sam3_video_base import MaskletConfirmationStatus, Sam3VideoBase
20
+ from sam3.model.utils.misc import copy_data_to_device
21
+ from sam3.perflib.compile import compile_wrapper, shape_logging_wrapper
22
+ from sam3.perflib.masks_ops import masks_to_boxes as perf_masks_to_boxes
23
+ from torchvision.ops import masks_to_boxes
24
+ from tqdm.auto import tqdm
25
+
26
+ logger = get_logger(__name__)
27
+
28
+
29
+ class Sam3VideoInference(Sam3VideoBase):
30
+ TEXT_ID_FOR_TEXT = 0
31
+ TEXT_ID_FOR_VISUAL = 1
32
+
33
+ def __init__(
34
+ self,
35
+ image_size=1008,
36
+ image_mean=(0.5, 0.5, 0.5),
37
+ image_std=(0.5, 0.5, 0.5),
38
+ compile_model=False,
39
+ **kwargs,
40
+ ):
41
+ """
42
+ hotstart_delay: int, the delay (in #frames) before the model starts to yield output, 0 to disable hotstart delay.
43
+ hotstart_unmatch_thresh: int, remove the object if it has this many unmatched frames within its hotstart_delay period.
44
+ If `hotstart_delay` is set to 0, this parameter is ignored.
45
+ hotstart_dup_thresh: int, remove the object if it has overlapped with another object this many frames within its hotstart_delay period.
46
+ """
47
+ super().__init__(**kwargs)
48
+ self.image_size = image_size
49
+ self.image_mean = image_mean
50
+ self.image_std = image_std
51
+ self.compile_model = compile_model
52
+
53
+ @torch.inference_mode()
54
+ def init_state(
55
+ self,
56
+ resource_path,
57
+ offload_video_to_cpu=False,
58
+ async_loading_frames=False,
59
+ video_loader_type="cv2",
60
+ ):
61
+ """Initialize an inference state from `resource_path` (an image or a video)."""
62
+ images, orig_height, orig_width = load_resource_as_video_frames(
63
+ resource_path=resource_path,
64
+ image_size=self.image_size,
65
+ offload_video_to_cpu=offload_video_to_cpu,
66
+ img_mean=self.image_mean,
67
+ img_std=self.image_std,
68
+ async_loading_frames=async_loading_frames,
69
+ video_loader_type=video_loader_type,
70
+ )
71
+ inference_state = {}
72
+ inference_state["image_size"] = self.image_size
73
+ inference_state["num_frames"] = len(images)
74
+ # the original video height and width, used for resizing final output scores
75
+ inference_state["orig_height"] = orig_height
76
+ inference_state["orig_width"] = orig_width
77
+ # values that don't change across frames (so we only need to hold one copy of them)
78
+ inference_state["constants"] = {}
79
+ # inputs on each frame
80
+ self._construct_initial_input_batch(inference_state, images)
81
+ # initialize extra states
82
+ inference_state["tracker_inference_states"] = []
83
+ inference_state["tracker_metadata"] = {}
84
+ inference_state["feature_cache"] = {}
85
+ inference_state["cached_frame_outputs"] = {}
86
+ inference_state["action_history"] = [] # for logging user actions
87
+ inference_state["is_image_only"] = is_image_type(resource_path)
88
+ return inference_state
89
+
90
+ @torch.inference_mode()
91
+ def reset_state(self, inference_state):
92
+ """Revert `inference_state` to what it was right after initialization."""
93
+ inference_state["input_batch"].find_text_batch[0] = "<text placeholder>"
94
+ inference_state["text_prompt"] = None
95
+ for t in range(inference_state["num_frames"]):
96
+ inference_state["input_batch"].find_inputs[t].text_ids[...] = 0
97
+ # constructing an output list in inference state (we start with an empty list)
98
+ inference_state["previous_stages_out"][t] = None
99
+ inference_state["per_frame_raw_point_input"][t] = None
100
+ inference_state["per_frame_raw_box_input"][t] = None
101
+ inference_state["per_frame_visual_prompt"][t] = None
102
+ inference_state["per_frame_geometric_prompt"][t] = None
103
+ inference_state["per_frame_cur_step"][t] = 0
104
+
105
+ inference_state["visual_prompt_embed"] = None
106
+ inference_state["visual_prompt_mask"] = None
107
+ inference_state["tracker_inference_states"].clear()
108
+ inference_state["tracker_metadata"].clear()
109
+ inference_state["feature_cache"].clear()
110
+ inference_state["cached_frame_outputs"].clear()
111
+ inference_state["action_history"].clear() # for logging user actions
112
+
113
+ def _construct_initial_input_batch(self, inference_state, images):
114
+ """Construct an initial `BatchedDatapoint` instance as input."""
115
+ # 1) img_batch
116
+ num_frames = len(images)
117
+ device = self.device
118
+
119
+ # 2) find_text_batch
120
+ # "<text placeholder>" will be replaced by the actual text prompt when adding prompts
121
+ find_text_batch = ["<text placeholder>", "visual"]
122
+
123
+ # 3) find_inputs
124
+ input_box_embedding_dim = 258 # historical default
125
+ input_points_embedding_dim = 257 # historical default
126
+ stages = [
127
+ FindStage(
128
+ img_ids=[stage_id],
129
+ text_ids=[0],
130
+ input_boxes=[torch.zeros(input_box_embedding_dim)],
131
+ input_boxes_mask=[torch.empty(0, dtype=torch.bool)],
132
+ input_boxes_label=[torch.empty(0, dtype=torch.long)],
133
+ input_points=[torch.empty(0, input_points_embedding_dim)],
134
+ input_points_mask=[torch.empty(0)],
135
+ object_ids=[],
136
+ )
137
+ for stage_id in range(num_frames)
138
+ ]
139
+ for i in range(len(stages)):
140
+ stages[i] = convert_my_tensors(stages[i])
141
+
142
+ # construct the final `BatchedDatapoint` and cast to GPU
143
+ input_batch = BatchedDatapoint(
144
+ img_batch=images,
145
+ find_text_batch=find_text_batch,
146
+ find_inputs=stages,
147
+ find_targets=[None] * num_frames,
148
+ find_metadatas=[None] * num_frames,
149
+ )
150
+ input_batch = copy_data_to_device(input_batch, device, non_blocking=True)
151
+ inference_state["input_batch"] = input_batch
152
+
153
+ # construct the placeholder interactive prompts and tracking queries
154
+ bs = 1
155
+ inference_state["constants"]["empty_geometric_prompt"] = Prompt(
156
+ box_embeddings=torch.zeros(0, bs, 4, device=device),
157
+ box_mask=torch.zeros(bs, 0, device=device, dtype=torch.bool),
158
+ box_labels=torch.zeros(0, bs, device=device, dtype=torch.long),
159
+ point_embeddings=torch.zeros(0, bs, 2, device=device),
160
+ point_mask=torch.zeros(bs, 0, device=device, dtype=torch.bool),
161
+ point_labels=torch.zeros(0, bs, device=device, dtype=torch.long),
162
+ )
163
+
164
+ # constructing an output list in inference state (we start with an empty list)
165
+ inference_state["previous_stages_out"] = [None] * num_frames
166
+ inference_state["text_prompt"] = None
167
+ inference_state["per_frame_raw_point_input"] = [None] * num_frames
168
+ inference_state["per_frame_raw_box_input"] = [None] * num_frames
169
+ inference_state["per_frame_visual_prompt"] = [None] * num_frames
170
+ inference_state["per_frame_geometric_prompt"] = [None] * num_frames
171
+ inference_state["per_frame_cur_step"] = [0] * num_frames
172
+
173
+ # placeholders for cached outputs
174
+ # (note: currently, a single visual prompt embedding is shared for all frames)
175
+ inference_state["visual_prompt_embed"] = None
176
+ inference_state["visual_prompt_mask"] = None
177
+
178
+ def _get_visual_prompt(self, inference_state, frame_idx, boxes_cxcywh, box_labels):
179
+ """
180
+ Handle the case of visual prompt. Currently, in the inference API we do not
181
+ explicitly distinguish between initial box as visual prompt vs subsequent boxes
182
+ or boxes after inference for refinement.
183
+ """
184
+ # If the frame hasn't had any inference results before (prompting or propagation),
185
+ # we treat the first added box prompt as a visual prompt; otherwise, we treat
186
+ # the first box just as a refinement prompt.
187
+ is_new_visual_prompt = (
188
+ inference_state["per_frame_visual_prompt"][frame_idx] is None
189
+ and inference_state["previous_stages_out"][frame_idx] is None
190
+ )
191
+ if is_new_visual_prompt:
192
+ if boxes_cxcywh.size(0) != 1:
193
+ raise RuntimeError(
194
+ "visual prompts (box as an initial prompt) should only have one box, "
195
+ f"but got {boxes_cxcywh.shape=}"
196
+ )
197
+ if not box_labels.item():
198
+ logging.warning("A negative box is added as a visual prompt.")
199
+ # take the first box prompt as a visual prompt
200
+ device = self.device
201
+ new_visual_prompt = Prompt(
202
+ box_embeddings=boxes_cxcywh[None, 0:1, :].to(device), # (seq, bs, 4)
203
+ box_mask=None,
204
+ box_labels=box_labels[None, 0:1].to(device), # (seq, bs)
205
+ point_embeddings=None,
206
+ point_mask=None,
207
+ point_labels=None,
208
+ )
209
+ inference_state["per_frame_visual_prompt"][frame_idx] = new_visual_prompt
210
+ else:
211
+ new_visual_prompt = None
212
+
213
+ # `boxes_cxcywh` and `box_labels` contains all the raw box inputs added so far
214
+ # strip any visual prompt from the input boxes (for geometric prompt encoding)
215
+ if inference_state["per_frame_visual_prompt"][frame_idx] is not None:
216
+ boxes_cxcywh = boxes_cxcywh[1:]
217
+ box_labels = box_labels[1:]
218
+
219
+ return boxes_cxcywh, box_labels, new_visual_prompt
220
+
221
+ def _get_processing_order(
222
+ self, inference_state, start_frame_idx, max_frame_num_to_track, reverse
223
+ ):
224
+ num_frames = inference_state["num_frames"]
225
+ previous_stages_out = inference_state["previous_stages_out"]
226
+ if all(out is None for out in previous_stages_out) and start_frame_idx is None:
227
+ raise RuntimeError(
228
+ "No prompts are received on any frames. Please add prompt on at least one frame before propagation."
229
+ )
230
+ # set start index, end index, and processing order
231
+ if start_frame_idx is None:
232
+ # default: start from the earliest frame with input points
233
+ start_frame_idx = min(
234
+ t for t, out in enumerate(previous_stages_out) if out is not None
235
+ )
236
+ if max_frame_num_to_track is None:
237
+ # default: track all the frames in the video
238
+ max_frame_num_to_track = num_frames
239
+ if reverse:
240
+ end_frame_idx = start_frame_idx - max_frame_num_to_track
241
+ end_frame_idx = max(end_frame_idx, 0)
242
+ processing_order = range(start_frame_idx - 1, end_frame_idx - 1, -1)
243
+ else:
244
+ end_frame_idx = start_frame_idx + max_frame_num_to_track
245
+ end_frame_idx = min(end_frame_idx, num_frames - 1)
246
+ processing_order = range(start_frame_idx, end_frame_idx + 1)
247
+ return processing_order, end_frame_idx
248
+
249
+ @torch.inference_mode()
250
+ def propagate_in_video(
251
+ self,
252
+ inference_state,
253
+ start_frame_idx=None,
254
+ max_frame_num_to_track=None,
255
+ reverse=False,
256
+ ):
257
+ """
258
+ Propagate the prompts to get grounding results for the entire video. This method
259
+ is a generator and yields inference outputs for all frames in the range specified
260
+ by `start_frame_idx`, `max_frame_num_to_track`, and `reverse`.
261
+ """
262
+ # compile the model (it's a no-op if the model is already compiled)
263
+ # note that it's intentionally added to `self.propagate_in_video`, so that the first
264
+ # `self.add_prompt` call will be done in eager mode to fill in the decoder buffers
265
+ # such as positional encoding cache)
266
+ self._compile_model()
267
+
268
+ processing_order, end_frame_idx = self._get_processing_order(
269
+ inference_state,
270
+ start_frame_idx,
271
+ max_frame_num_to_track,
272
+ reverse=reverse,
273
+ )
274
+
275
+ # Store max_frame_num_to_track in feature_cache for downstream methods
276
+ inference_state["feature_cache"]["tracking_bounds"] = {
277
+ "max_frame_num_to_track": max_frame_num_to_track,
278
+ "propagate_in_video_start_frame_idx": start_frame_idx,
279
+ }
280
+
281
+ hotstart_buffer = []
282
+ hotstart_removed_obj_ids = set()
283
+ # when deciding whether to output a masklet on `yield_frame_idx`, we check whether the object is confirmed
284
+ # in a future frame (`unconfirmed_frame_delay` frames after the current frame). For example, if we require
285
+ # an object to be detected in 3 consecutive frames to be confirmed, then we look 2 frames in the future --
286
+ # e.g., we output an object on frame 4 only if it becomes confirmed on frame 6.
287
+ unconfirmed_status_delay = self.masklet_confirmation_consecutive_det_thresh - 1
288
+ unconfirmed_obj_ids_per_frame = {} # frame_idx -> hidden_obj_ids
289
+ for frame_idx in tqdm(
290
+ processing_order, desc="propagate_in_video", disable=self.rank > 0
291
+ ):
292
+ out = self._run_single_frame_inference(inference_state, frame_idx, reverse)
293
+
294
+ if self.hotstart_delay > 0:
295
+ # accumulate the outputs for the first `hotstart_delay` frames
296
+ hotstart_buffer.append([frame_idx, out])
297
+ # update the object IDs removed by hotstart so that we don't output them
298
+ if self.rank == 0:
299
+ hotstart_removed_obj_ids.update(out["removed_obj_ids"])
300
+ unconfirmed_obj_ids = out.get("unconfirmed_obj_ids", None)
301
+ if unconfirmed_obj_ids is not None:
302
+ unconfirmed_obj_ids_per_frame[frame_idx] = unconfirmed_obj_ids
303
+
304
+ if frame_idx == end_frame_idx:
305
+ # we reached the end of propagation -- yield all frames in the buffer
306
+ yield_list = hotstart_buffer
307
+ hotstart_buffer = []
308
+ elif len(hotstart_buffer) >= self.hotstart_delay:
309
+ # we have enough frames -- yield and remove the first (oldest) frame from the buffer
310
+ yield_list = hotstart_buffer[:1]
311
+ hotstart_buffer = hotstart_buffer[1:]
312
+ else:
313
+ # not enough frames yet -- skip yielding
314
+ yield_list = []
315
+ else:
316
+ yield_list = [(frame_idx, out)] # output the current frame
317
+
318
+ for yield_frame_idx, yield_out in yield_list:
319
+ # post-process the output and yield it
320
+ if self.rank == 0:
321
+ suppressed_obj_ids = yield_out["suppressed_obj_ids"]
322
+ unconfirmed_status_frame_idx = (
323
+ yield_frame_idx + unconfirmed_status_delay
324
+ if not reverse
325
+ else yield_frame_idx - unconfirmed_status_delay
326
+ )
327
+
328
+ # Clamp the frame index to stay within video bounds
329
+ num_frames = inference_state["num_frames"]
330
+ unconfirmed_status_frame_idx = max(
331
+ 0, min(unconfirmed_status_frame_idx, num_frames - 1)
332
+ )
333
+
334
+ unconfirmed_obj_ids = unconfirmed_obj_ids_per_frame.get(
335
+ unconfirmed_status_frame_idx, None
336
+ )
337
+ postprocessed_out = self._postprocess_output(
338
+ inference_state,
339
+ yield_out,
340
+ hotstart_removed_obj_ids,
341
+ suppressed_obj_ids,
342
+ unconfirmed_obj_ids,
343
+ )
344
+
345
+ self._cache_frame_outputs(
346
+ inference_state,
347
+ yield_frame_idx,
348
+ yield_out["obj_id_to_mask"],
349
+ suppressed_obj_ids=suppressed_obj_ids,
350
+ removed_obj_ids=hotstart_removed_obj_ids,
351
+ unconfirmed_obj_ids=unconfirmed_obj_ids,
352
+ )
353
+ else:
354
+ postprocessed_out = None # no output on other GPUs
355
+ yield yield_frame_idx, postprocessed_out
356
+
357
+ def _run_single_frame_inference(self, inference_state, frame_idx, reverse):
358
+ """
359
+ Perform inference on a single frame and get its inference results. This would
360
+ also update `inference_state`.
361
+ """
362
+ # prepare inputs
363
+ input_batch = inference_state["input_batch"]
364
+ tracker_states_local = inference_state["tracker_inference_states"]
365
+ has_text_prompt = inference_state["text_prompt"] is not None
366
+ has_geometric_prompt = (
367
+ inference_state["per_frame_geometric_prompt"][frame_idx] is not None
368
+ )
369
+ # run inference for the current frame
370
+ (
371
+ obj_id_to_mask,
372
+ obj_id_to_score,
373
+ tracker_states_local_new,
374
+ tracker_metadata_new,
375
+ frame_stats,
376
+ _,
377
+ ) = self._det_track_one_frame(
378
+ frame_idx=frame_idx,
379
+ num_frames=inference_state["num_frames"],
380
+ reverse=reverse,
381
+ input_batch=input_batch,
382
+ geometric_prompt=(
383
+ inference_state["constants"]["empty_geometric_prompt"]
384
+ if not has_geometric_prompt
385
+ else inference_state["per_frame_geometric_prompt"][frame_idx]
386
+ ),
387
+ tracker_states_local=tracker_states_local,
388
+ tracker_metadata_prev=inference_state["tracker_metadata"],
389
+ feature_cache=inference_state["feature_cache"],
390
+ orig_vid_height=inference_state["orig_height"],
391
+ orig_vid_width=inference_state["orig_width"],
392
+ is_image_only=inference_state["is_image_only"],
393
+ allow_new_detections=has_text_prompt or has_geometric_prompt,
394
+ )
395
+ # update inference state
396
+ inference_state["tracker_inference_states"] = tracker_states_local_new
397
+ inference_state["tracker_metadata"] = tracker_metadata_new
398
+ # use a dummy string in "previous_stages_out" to indicate this frame has outputs
399
+ inference_state["previous_stages_out"][frame_idx] = "_THIS_FRAME_HAS_OUTPUTS_"
400
+
401
+ if self.rank == 0:
402
+ self._cache_frame_outputs(inference_state, frame_idx, obj_id_to_mask)
403
+
404
+ out = {
405
+ "obj_id_to_mask": obj_id_to_mask,
406
+ "obj_id_to_score": obj_id_to_score, # first frame detection score
407
+ "obj_id_to_tracker_score": tracker_metadata_new[
408
+ "obj_id_to_tracker_score_frame_wise"
409
+ ][frame_idx],
410
+ }
411
+ # removed_obj_ids is only needed on rank 0 to handle hotstart delay buffer
412
+ if self.rank == 0:
413
+ rank0_metadata = tracker_metadata_new["rank0_metadata"]
414
+ removed_obj_ids = rank0_metadata["removed_obj_ids"]
415
+ out["removed_obj_ids"] = removed_obj_ids
416
+ out["suppressed_obj_ids"] = rank0_metadata["suppressed_obj_ids"][frame_idx]
417
+ out["frame_stats"] = frame_stats
418
+ if self.masklet_confirmation_enable:
419
+ status = rank0_metadata["masklet_confirmation"]["status"]
420
+ is_unconfirmed = status == MaskletConfirmationStatus.UNCONFIRMED.value
421
+ out["unconfirmed_obj_ids"] = tracker_metadata_new["obj_ids_all_gpu"][
422
+ is_unconfirmed
423
+ ].tolist()
424
+ else:
425
+ out["unconfirmed_obj_ids"] = []
426
+
427
+ return out
428
+
429
+ def _postprocess_output(
430
+ self,
431
+ inference_state,
432
+ out,
433
+ removed_obj_ids=None,
434
+ suppressed_obj_ids=None,
435
+ unconfirmed_obj_ids=None,
436
+ ):
437
+ obj_id_to_mask = out["obj_id_to_mask"] # low res masks
438
+ curr_obj_ids = sorted(obj_id_to_mask.keys())
439
+ H_video, W_video = inference_state["orig_height"], inference_state["orig_width"]
440
+ if len(curr_obj_ids) == 0:
441
+ out_obj_ids = torch.zeros(0, dtype=torch.int64)
442
+ out_probs = torch.zeros(0, dtype=torch.float32)
443
+ out_binary_masks = torch.zeros(0, H_video, W_video, dtype=torch.bool)
444
+ out_boxes_xywh = torch.zeros(0, 4, dtype=torch.float32)
445
+ else:
446
+ out_obj_ids = torch.tensor(curr_obj_ids, dtype=torch.int64)
447
+ out_probs = torch.tensor(
448
+ [out["obj_id_to_score"][obj_id] for obj_id in curr_obj_ids]
449
+ )
450
+ out_tracker_probs = torch.tensor(
451
+ [
452
+ (
453
+ out["obj_id_to_tracker_score"][obj_id]
454
+ if obj_id in out["obj_id_to_tracker_score"]
455
+ else 0.0
456
+ )
457
+ for obj_id in curr_obj_ids
458
+ ]
459
+ )
460
+ out_binary_masks = torch.cat(
461
+ [obj_id_to_mask[obj_id] for obj_id in curr_obj_ids], dim=0
462
+ )
463
+
464
+ assert out_binary_masks.dtype == torch.bool
465
+ keep = out_binary_masks.any(dim=(1, 2)).cpu() # remove masks with 0 areas
466
+ # hide outputs for those object IDs in `obj_ids_to_hide`
467
+ obj_ids_to_hide = []
468
+ if suppressed_obj_ids is not None:
469
+ obj_ids_to_hide.extend(suppressed_obj_ids)
470
+ if removed_obj_ids is not None:
471
+ obj_ids_to_hide.extend(removed_obj_ids)
472
+ if unconfirmed_obj_ids is not None:
473
+ obj_ids_to_hide.extend(unconfirmed_obj_ids)
474
+ if len(obj_ids_to_hide) > 0:
475
+ obj_ids_to_hide_t = torch.tensor(obj_ids_to_hide, dtype=torch.int64)
476
+ keep &= ~torch.isin(out_obj_ids, obj_ids_to_hide_t)
477
+
478
+ # slice those valid entries from the original outputs
479
+ keep_idx = torch.nonzero(keep, as_tuple=True)[0]
480
+ keep_idx_gpu = keep_idx.pin_memory().to(
481
+ device=out_binary_masks.device, non_blocking=True
482
+ )
483
+
484
+ out_obj_ids = torch.index_select(out_obj_ids, 0, keep_idx)
485
+ out_probs = torch.index_select(out_probs, 0, keep_idx)
486
+ out_tracker_probs = torch.index_select(out_tracker_probs, 0, keep_idx)
487
+ out_binary_masks = torch.index_select(out_binary_masks, 0, keep_idx_gpu)
488
+
489
+ if perflib.is_enabled:
490
+ out_boxes_xyxy = perf_masks_to_boxes(
491
+ out_binary_masks, out_obj_ids.tolist()
492
+ )
493
+ else:
494
+ out_boxes_xyxy = masks_to_boxes(out_binary_masks)
495
+
496
+ out_boxes_xywh = box_xyxy_to_xywh(out_boxes_xyxy) # convert to xywh format
497
+ # normalize boxes
498
+ out_boxes_xywh[..., 0] /= W_video
499
+ out_boxes_xywh[..., 1] /= H_video
500
+ out_boxes_xywh[..., 2] /= W_video
501
+ out_boxes_xywh[..., 3] /= H_video
502
+
503
+ # apply non-overlapping constraints on the existing masklets
504
+ if out_binary_masks.shape[0] > 1:
505
+ assert len(out_binary_masks) == len(out_tracker_probs)
506
+ out_binary_masks = (
507
+ self.tracker._apply_object_wise_non_overlapping_constraints(
508
+ out_binary_masks.unsqueeze(1),
509
+ out_tracker_probs.unsqueeze(1).to(out_binary_masks.device),
510
+ background_value=0,
511
+ ).squeeze(1)
512
+ ) > 0
513
+
514
+ outputs = {
515
+ "out_obj_ids": out_obj_ids.cpu().numpy(),
516
+ "out_probs": out_probs.cpu().numpy(),
517
+ "out_boxes_xywh": out_boxes_xywh.cpu().numpy(),
518
+ "out_binary_masks": out_binary_masks.cpu().numpy(),
519
+ "frame_stats": out.get("frame_stats", None),
520
+ }
521
+ return outputs
522
+
523
+ def _cache_frame_outputs(
524
+ self,
525
+ inference_state,
526
+ frame_idx,
527
+ obj_id_to_mask,
528
+ suppressed_obj_ids=None,
529
+ removed_obj_ids=None,
530
+ unconfirmed_obj_ids=None,
531
+ ):
532
+ # Filter out suppressed, removed, and unconfirmed objects from the cache
533
+ filtered_obj_id_to_mask = obj_id_to_mask.copy()
534
+
535
+ objects_to_exclude = set()
536
+ if suppressed_obj_ids is not None:
537
+ objects_to_exclude.update(suppressed_obj_ids)
538
+ if removed_obj_ids is not None:
539
+ objects_to_exclude.update(removed_obj_ids)
540
+ if unconfirmed_obj_ids is not None:
541
+ objects_to_exclude.update(unconfirmed_obj_ids)
542
+
543
+ if objects_to_exclude:
544
+ for obj_id in objects_to_exclude:
545
+ if obj_id in filtered_obj_id_to_mask:
546
+ del filtered_obj_id_to_mask[obj_id]
547
+
548
+ inference_state["cached_frame_outputs"][frame_idx] = filtered_obj_id_to_mask
549
+
550
+ def _build_tracker_output(
551
+ self, inference_state, frame_idx, refined_obj_id_to_mask=None
552
+ ):
553
+ assert (
554
+ "cached_frame_outputs" in inference_state
555
+ and frame_idx in inference_state["cached_frame_outputs"]
556
+ ), "No cached outputs found. Ensure normal propagation has run first to populate the cache."
557
+ cached_outputs = inference_state["cached_frame_outputs"][frame_idx]
558
+
559
+ obj_id_to_mask = cached_outputs.copy()
560
+
561
+ # Update with refined masks if provided
562
+ if refined_obj_id_to_mask is not None:
563
+ for obj_id, refined_mask in refined_obj_id_to_mask.items():
564
+ assert (
565
+ refined_mask is not None
566
+ ), f"Refined mask data must be provided for obj_id {obj_id}"
567
+ obj_id_to_mask[obj_id] = refined_mask
568
+
569
+ return obj_id_to_mask
570
+
571
+ def _compile_model(self):
572
+ """Compile the SAM model with torch.compile for speedup."""
573
+ is_compiled = getattr(self, "_model_is_compiled", False)
574
+ if is_compiled or not self.compile_model:
575
+ return
576
+
577
+ import torch._dynamo
578
+
579
+ # a larger cache size to hold varying number of shapes for torch.compile
580
+ # see https://github.com/pytorch/pytorch/blob/v2.5.1/torch/_dynamo/config.py#L42-L49
581
+ torch._dynamo.config.cache_size_limit = 128
582
+ torch._dynamo.config.accumulated_cache_size_limit = 2048
583
+ torch._dynamo.config.capture_scalar_outputs = True
584
+ torch._dynamo.config.suppress_errors = True
585
+
586
+ # Compile module components
587
+ # skip compilation of `_encode_prompt` since it sometimes tiggger SymInt errors
588
+ # self._encode_prompt = clone_output_wrapper(
589
+ # torch.compile(self._encode_prompt, fullgraph=True, mode="max-autotune")
590
+ # )
591
+
592
+ ## Compile SAM3 model components
593
+ self.detector.backbone.vision_backbone.forward = clone_output_wrapper(
594
+ torch.compile(
595
+ self.detector.backbone.vision_backbone.forward,
596
+ fullgraph=True,
597
+ mode="max-autotune",
598
+ )
599
+ )
600
+ self.detector.transformer.encoder.forward = clone_output_wrapper(
601
+ torch.compile(
602
+ self.detector.transformer.encoder.forward,
603
+ fullgraph=True,
604
+ mode="max-autotune",
605
+ )
606
+ )
607
+ self.detector.transformer.decoder.forward = clone_output_wrapper(
608
+ torch.compile(
609
+ self.detector.transformer.decoder.forward,
610
+ fullgraph=True,
611
+ mode="max-autotune",
612
+ dynamic=False,
613
+ )
614
+ )
615
+
616
+ self.detector.segmentation_head.forward = clone_output_wrapper(
617
+ torch.compile(
618
+ self.detector.segmentation_head.forward,
619
+ fullgraph=True,
620
+ mode="max-autotune",
621
+ )
622
+ )
623
+
624
+ ## Compile Tracker model components
625
+ self.tracker.maskmem_backbone.forward = compile_wrapper(
626
+ self.tracker.maskmem_backbone.forward,
627
+ mode="max-autotune",
628
+ fullgraph=True,
629
+ dynamic=False,
630
+ )
631
+
632
+ self.tracker.transformer.encoder.forward = shape_logging_wrapper(
633
+ compile_wrapper(
634
+ self.tracker.transformer.encoder.forward,
635
+ mode="max-autotune-no-cudagraphs",
636
+ fullgraph=True,
637
+ dynamic=True,
638
+ ),
639
+ keep_kwargs=["src", "src_pos", "prompt", "prompt_pos"],
640
+ )
641
+
642
+ self.tracker.sam_mask_decoder.forward = compile_wrapper(
643
+ self.tracker.sam_mask_decoder.forward,
644
+ mode="max-autotune",
645
+ fullgraph=True,
646
+ dynamic=False, # Accuracy regression on True
647
+ )
648
+
649
+ self._model_is_compiled = True
650
+
651
+ def _warm_up_vg_propagation(self, inference_state, start_frame_idx=0):
652
+ # use different tracking score thresholds for each round to simulate different number of output objects
653
+ num_objects_list = range(self.num_obj_for_compile + 1)
654
+ new_det_score_thresh_list = [0.3, 0.5, 0.7]
655
+ num_rounds = len(new_det_score_thresh_list)
656
+ orig_new_det_thresh = self.new_det_thresh
657
+
658
+ for i, thresh in enumerate(new_det_score_thresh_list):
659
+ self.new_det_thresh = thresh
660
+ for num_objects in num_objects_list:
661
+ logger.info(f"{i+1}/{num_rounds} warming up model compilation")
662
+ self.add_prompt(
663
+ inference_state, frame_idx=start_frame_idx, text_str="cat"
664
+ )
665
+ logger.info(
666
+ f"{i+1}/{num_rounds} warming up model compilation -- simulating {num_objects}/{self.num_obj_for_compile} objects"
667
+ )
668
+ inference_state = self.add_fake_objects_to_inference_state(
669
+ inference_state, num_objects, frame_idx=start_frame_idx
670
+ )
671
+ inference_state["tracker_metadata"]["rank0_metadata"].update(
672
+ {
673
+ "masklet_confirmation": {
674
+ "status": np.zeros(num_objects, dtype=np.int64),
675
+ "consecutive_det_num": np.zeros(
676
+ num_objects, dtype=np.int64
677
+ ),
678
+ }
679
+ }
680
+ )
681
+ for _ in self.propagate_in_video(
682
+ inference_state, start_frame_idx, reverse=False
683
+ ):
684
+ pass
685
+ for _ in self.propagate_in_video(
686
+ inference_state, start_frame_idx, reverse=True
687
+ ):
688
+ pass
689
+ self.reset_state(inference_state)
690
+ logger.info(
691
+ f"{i+1}/{num_rounds} warming up model compilation -- completed round {i+1} out of {num_rounds}"
692
+ )
693
+
694
+ # Warm up Tracker memory encoder with varying input shapes
695
+ num_iters = 3
696
+ feat_size = self.tracker.sam_image_embedding_size**2 # 72 * 72 = 5184
697
+ hidden_dim = self.tracker.hidden_dim # 256
698
+ mem_dim = self.tracker.mem_dim # 64
699
+ for _ in tqdm(range(num_iters)):
700
+ for b in range(1, self.num_obj_for_compile + 1):
701
+ for i in range(
702
+ 1,
703
+ self.tracker.max_cond_frames_in_attn + self.tracker.num_maskmem,
704
+ ):
705
+ for j in range(
706
+ self.tracker.max_cond_frames_in_attn
707
+ + self.tracker.max_obj_ptrs_in_encoder
708
+ ):
709
+ num_obj_ptr_tokens = (hidden_dim // mem_dim) * j
710
+ src = torch.randn(feat_size, b, hidden_dim, device=self.device)
711
+ src_pos = torch.randn(
712
+ feat_size, b, hidden_dim, device=self.device
713
+ )
714
+ prompt = torch.randn(
715
+ feat_size * i + num_obj_ptr_tokens,
716
+ b,
717
+ mem_dim,
718
+ device=self.device,
719
+ )
720
+ prompt_pos = torch.randn(
721
+ feat_size * i + num_obj_ptr_tokens,
722
+ b,
723
+ mem_dim,
724
+ device=self.device,
725
+ )
726
+
727
+ self.tracker.transformer.encoder.forward(
728
+ src=src,
729
+ src_pos=src_pos,
730
+ prompt=prompt,
731
+ prompt_pos=prompt_pos,
732
+ num_obj_ptr_tokens=num_obj_ptr_tokens,
733
+ )
734
+
735
+ self.new_det_thresh = orig_new_det_thresh
736
+ return inference_state
737
+
738
+ def add_fake_objects_to_inference_state(
739
+ self, inference_state, num_objects, frame_idx
740
+ ):
741
+ new_det_obj_ids_local = np.arange(num_objects)
742
+ high_res_H, high_res_W = (
743
+ self.tracker.maskmem_backbone.mask_downsampler.interpol_size
744
+ )
745
+ new_det_masks = torch.ones(
746
+ len(new_det_obj_ids_local), high_res_H, high_res_W
747
+ ).to(self.device)
748
+
749
+ inference_state["tracker_inference_states"] = self._tracker_add_new_objects(
750
+ frame_idx=frame_idx,
751
+ num_frames=inference_state["num_frames"],
752
+ new_obj_ids=new_det_obj_ids_local,
753
+ new_obj_masks=new_det_masks,
754
+ tracker_states_local=inference_state["tracker_inference_states"],
755
+ orig_vid_height=inference_state["orig_height"],
756
+ orig_vid_width=inference_state["orig_width"],
757
+ feature_cache=inference_state["feature_cache"],
758
+ )
759
+
760
+ # Synthesize obj_id_to_mask data for cached_frame_outputs to support _build_tracker_output during warmup
761
+ obj_id_to_mask = {}
762
+ if num_objects > 0:
763
+ H_video = inference_state["orig_height"]
764
+ W_video = inference_state["orig_width"]
765
+
766
+ video_res_masks = F.interpolate(
767
+ new_det_masks.unsqueeze(1), # Add channel dimension for interpolation
768
+ size=(H_video, W_video),
769
+ mode="bilinear",
770
+ align_corners=False,
771
+ ) # (num_objects, 1, H_video, W_video)
772
+ for i, obj_id in enumerate(new_det_obj_ids_local):
773
+ obj_id_to_mask[obj_id] = (video_res_masks[i] > 0.0).to(torch.bool)
774
+ if self.rank == 0:
775
+ for fidx in range(inference_state["num_frames"]):
776
+ self._cache_frame_outputs(inference_state, fidx, obj_id_to_mask)
777
+
778
+ inference_state["tracker_metadata"].update(
779
+ {
780
+ "obj_ids_per_gpu": [np.arange(num_objects)],
781
+ "obj_ids_all_gpu": np.arange(num_objects), # Same as 1 GPU
782
+ "num_obj_per_gpu": [num_objects],
783
+ "obj_id_to_score": {i: 1.0 for i in range(num_objects)},
784
+ "max_obj_id": num_objects,
785
+ "rank0_metadata": {
786
+ "masklet_confirmation": {
787
+ "status": np.zeros(num_objects, dtype=np.int64),
788
+ "consecutive_det_num": np.zeros(num_objects, dtype=np.int64),
789
+ },
790
+ "removed_obj_ids": set(),
791
+ "suppressed_obj_ids": defaultdict(set),
792
+ },
793
+ }
794
+ )
795
+ return inference_state
796
+
797
+ @torch.inference_mode()
798
+ @torch.autocast(device_type="cuda", dtype=torch.bfloat16)
799
+ def warm_up_compilation(self):
800
+ """
801
+ Warm up the model by running a dummy inference to compile the model. This is
802
+ useful to avoid the compilation overhead in the first inference call.
803
+ """
804
+ if not self.compile_model:
805
+ return
806
+ self._warm_up_complete = False
807
+ if self.device.type != "cuda":
808
+ raise RuntimeError(
809
+ f"The model must be on CUDA for warm-up compilation, got {self.device=}."
810
+ )
811
+
812
+ # temporally set to single GPU temporarily for warm-up compilation
813
+ orig_rank = self.rank
814
+ orig_world_size = self.world_size
815
+ self.rank = self.detector.rank = 0
816
+ self.world_size = self.detector.world_size = 1
817
+ orig_recondition_every_nth_frame = self.recondition_every_nth_frame
818
+ # self.recondition_every_nth_frame = 2
819
+
820
+ # Get a random video
821
+ inference_state = self.init_state(resource_path="<load-dummy-video-30>")
822
+ start_frame_idx = 0
823
+
824
+ # Run basic propagation warm-up
825
+ inference_state = self._warm_up_vg_propagation(inference_state, start_frame_idx)
826
+
827
+ logger.info("Warm-up compilation completed.")
828
+
829
+ # revert to the original GPU and rank
830
+ self.rank = self.detector.rank = orig_rank
831
+ self.world_size = self.detector.world_size = orig_world_size
832
+ self.recondition_every_nth_frame = orig_recondition_every_nth_frame
833
+ self._warm_up_complete = True
834
+ self.tracker.transformer.encoder.forward.set_logging(True)
835
+
836
+ @torch.inference_mode()
837
+ def add_prompt(
838
+ self,
839
+ inference_state,
840
+ frame_idx,
841
+ text_str=None,
842
+ boxes_xywh=None,
843
+ box_labels=None,
844
+ ):
845
+ """
846
+ Add text, point or box prompts on a single frame. This method returns the inference
847
+ outputs only on the prompted frame.
848
+
849
+ Note that text prompts are NOT associated with a particular frame (i.e. they apply
850
+ to all frames). However, we only run inference on the frame specified in `frame_idx`.
851
+ """
852
+ logger.debug("Running add_prompt on frame %d", frame_idx)
853
+
854
+ num_frames = inference_state["num_frames"]
855
+ assert (
856
+ text_str is not None or boxes_xywh is not None
857
+ ), "at least one type of prompt (text, boxes) must be provided"
858
+ assert (
859
+ 0 <= frame_idx < num_frames
860
+ ), f"{frame_idx=} is out of range for a total of {num_frames} frames"
861
+
862
+ # since it's a semantic prompt, we start over
863
+ self.reset_state(inference_state)
864
+
865
+ # 1) add text prompt
866
+ if text_str is not None and text_str != "visual":
867
+ inference_state["text_prompt"] = text_str
868
+ inference_state["input_batch"].find_text_batch[0] = text_str
869
+ text_id = self.TEXT_ID_FOR_TEXT
870
+ else:
871
+ inference_state["text_prompt"] = None
872
+ inference_state["input_batch"].find_text_batch[0] = "<text placeholder>"
873
+ text_id = self.TEXT_ID_FOR_VISUAL
874
+ for t in range(inference_state["num_frames"]):
875
+ inference_state["input_batch"].find_inputs[t].text_ids[...] = text_id
876
+
877
+ # 2) handle box prompt
878
+ assert (boxes_xywh is not None) == (box_labels is not None)
879
+ if boxes_xywh is not None:
880
+ boxes_xywh = torch.as_tensor(boxes_xywh, dtype=torch.float32)
881
+ box_labels = torch.as_tensor(box_labels, dtype=torch.long)
882
+ # input boxes are expected to be [xmin, ymin, width, height] format
883
+ # in normalized coordinates of range 0~1, similar to FA
884
+ assert boxes_xywh.dim() == 2
885
+ assert boxes_xywh.size(0) > 0 and boxes_xywh.size(-1) == 4
886
+ assert box_labels.dim() == 1 and box_labels.size(0) == boxes_xywh.size(0)
887
+ boxes_cxcywh = box_xywh_to_cxcywh(boxes_xywh)
888
+ assert (boxes_xywh >= 0).all().item() and (boxes_xywh <= 1).all().item()
889
+ assert (boxes_cxcywh >= 0).all().item() and (boxes_cxcywh <= 1).all().item()
890
+
891
+ new_box_input = boxes_cxcywh, box_labels
892
+ inference_state["per_frame_raw_box_input"][frame_idx] = new_box_input
893
+
894
+ # handle the case of visual prompt (also added as an input box from the UI)
895
+ boxes_cxcywh, box_labels, geometric_prompt = self._get_visual_prompt(
896
+ inference_state, frame_idx, boxes_cxcywh, box_labels
897
+ )
898
+
899
+ inference_state["per_frame_geometric_prompt"][frame_idx] = geometric_prompt
900
+
901
+ out = self._run_single_frame_inference(
902
+ inference_state, frame_idx, reverse=False
903
+ )
904
+ return frame_idx, self._postprocess_output(inference_state, out)
905
+
906
+ @torch.autocast(device_type="cuda", dtype=torch.bfloat16)
907
+ def forward(self, input: BatchedDatapoint, is_inference: bool = False):
908
+ """This method is only used for benchmark eval (not used in the demo)."""
909
+ # set the model to single GPU for benchmark evaluation (to be compatible with trainer)
910
+ orig_rank = self.rank
911
+ orig_world_size = self.world_size
912
+ self.rank = self.detector.rank = 0
913
+ self.world_size = self.detector.world_size = 1
914
+
915
+ # get data
916
+ text_prompt_ids = input.find_metadatas[0].original_category_id
917
+ text_prompt_list = input.find_text_batch
918
+
919
+ # loop over txt prompts
920
+ tracking_res = defaultdict(dict) # frame_idx --> {obj_id: mask}
921
+ scores_labels = defaultdict(tuple) # obj_id --> (score, text_prompt_id)
922
+ inference_state = self.init_state(resource_path=input.raw_images)
923
+ for prompt_id, prompt in zip(text_prompt_ids, text_prompt_list):
924
+ self.add_prompt(inference_state, frame_idx=0, text_str=prompt)
925
+ start_obj_id = max(scores_labels.keys(), default=-1) + 1 # prev max + 1
926
+
927
+ # propagate the prompts
928
+ obj_ids_this_prompt = set()
929
+ for frame_idx, out in self.propagate_in_video(
930
+ inference_state,
931
+ start_frame_idx=0,
932
+ max_frame_num_to_track=inference_state["num_frames"],
933
+ reverse=False,
934
+ ):
935
+ current_frame_res = tracking_res[frame_idx]
936
+ for obj_id, mask in zip(out["out_obj_ids"], out["out_binary_masks"]):
937
+ mask_tensor = torch.tensor(mask[None], dtype=torch.bool)
938
+ current_frame_res[obj_id + start_obj_id] = mask_tensor
939
+ obj_ids_this_prompt.update(current_frame_res.keys())
940
+
941
+ obj_id_to_score = inference_state["tracker_metadata"]["obj_id_to_score"]
942
+ for obj_id, score in obj_id_to_score.items():
943
+ if obj_id + start_obj_id in obj_ids_this_prompt:
944
+ score_tensor = torch.tensor(score, dtype=torch.float32)
945
+ scores_labels[obj_id + start_obj_id] = (score_tensor, prompt_id)
946
+
947
+ self.reset_state(inference_state)
948
+
949
+ video_id = input.find_metadatas[0].original_image_id[0].cpu().item()
950
+ preds = self.prep_for_evaluator(input.raw_images, tracking_res, scores_labels)
951
+
952
+ # revert the model to the original GPU and rank
953
+ self.rank = self.detector.rank = orig_rank
954
+ self.world_size = self.detector.world_size = orig_world_size
955
+ return {video_id: preds}
956
+
957
+ def back_convert(self, targets):
958
+ # Needed for retraining compatibility with trainer
959
+ return targets
960
+
961
+
962
+ class Sam3VideoInferenceWithInstanceInteractivity(Sam3VideoInference):
963
+ def __init__(
964
+ self,
965
+ use_prev_mem_frame=False,
966
+ use_stateless_refinement=False,
967
+ refinement_detector_cond_frame_removal_window=16,
968
+ **kwargs,
969
+ ):
970
+ """
971
+ use_prev_mem_frame: bool, whether to condition on previous memory frames for adding points
972
+ use_stateless_refinement: bool, whether to enable stateless refinement behavior
973
+ refinement_detector_cond_frame_removal_window: int, we remove a detector conditioning frame if it
974
+ is within this many frames of a user refined frame. Set to a large value (e.g. 10000) to
975
+ always remove detector conditioning frames if there is any user refinement in the video.
976
+ """
977
+ super().__init__(**kwargs)
978
+ self.use_prev_mem_frame = use_prev_mem_frame
979
+ self.use_stateless_refinement = use_stateless_refinement
980
+ self.refinement_detector_cond_frame_removal_window = (
981
+ refinement_detector_cond_frame_removal_window
982
+ )
983
+
984
+ def _init_new_tracker_state(self, inference_state):
985
+ return self.tracker.init_state(
986
+ cached_features=inference_state["feature_cache"],
987
+ video_height=inference_state["orig_height"],
988
+ video_width=inference_state["orig_width"],
989
+ num_frames=inference_state["num_frames"],
990
+ )
991
+
992
+ @torch.inference_mode()
993
+ def propagate_in_video(
994
+ self,
995
+ inference_state,
996
+ start_frame_idx=None,
997
+ max_frame_num_to_track=None,
998
+ reverse=False,
999
+ ):
1000
+ # step 1: check which type of propagation to run, should be the same for all GPUs.
1001
+ propagation_type, obj_ids = self.parse_action_history_for_propagation(
1002
+ inference_state
1003
+ )
1004
+ self.add_action_history(
1005
+ inference_state,
1006
+ action_type=propagation_type,
1007
+ obj_ids=obj_ids,
1008
+ frame_idx=start_frame_idx,
1009
+ )
1010
+
1011
+ # step 2: run full VG propagation
1012
+ if propagation_type == "propagation_full":
1013
+ logger.debug(f"Running full VG propagation (reverse={reverse}).")
1014
+ yield from super().propagate_in_video(
1015
+ inference_state,
1016
+ start_frame_idx=start_frame_idx,
1017
+ max_frame_num_to_track=max_frame_num_to_track,
1018
+ reverse=reverse,
1019
+ )
1020
+ return
1021
+
1022
+ # step 3: run Tracker partial propagation or direct fetch existing predictions
1023
+ assert propagation_type in ["propagation_partial", "propagation_fetch"]
1024
+ logger.debug(
1025
+ f"Running Tracker propagation for objects {obj_ids} and merging it with existing VG predictions (reverse={reverse})."
1026
+ if propagation_type == "propagation_partial"
1027
+ else f"Fetching existing VG predictions without running any propagation (reverse={reverse})."
1028
+ )
1029
+ processing_order, _ = self._get_processing_order(
1030
+ inference_state,
1031
+ start_frame_idx=start_frame_idx,
1032
+ max_frame_num_to_track=max_frame_num_to_track,
1033
+ reverse=reverse,
1034
+ )
1035
+
1036
+ tracker_metadata = inference_state["tracker_metadata"]
1037
+
1038
+ # if fetch just return from output
1039
+ if propagation_type == "propagation_fetch":
1040
+ for frame_idx in tqdm(processing_order):
1041
+ if self.rank == 0:
1042
+ obj_id_to_mask = inference_state["cached_frame_outputs"].get(
1043
+ frame_idx, {}
1044
+ )
1045
+ # post processing - remove suppressed obj_ids
1046
+ obj_id_to_score = tracker_metadata["obj_id_to_score"]
1047
+ suppressed_obj_ids = tracker_metadata["rank0_metadata"][
1048
+ "suppressed_obj_ids"
1049
+ ][frame_idx]
1050
+ obj_id_to_tracker_score = tracker_metadata[
1051
+ "obj_id_to_tracker_score_frame_wise"
1052
+ ][frame_idx]
1053
+
1054
+ out = {
1055
+ "obj_id_to_mask": obj_id_to_mask,
1056
+ "obj_id_to_score": obj_id_to_score,
1057
+ "obj_id_to_tracker_score": obj_id_to_tracker_score,
1058
+ }
1059
+ yield (
1060
+ frame_idx,
1061
+ self._postprocess_output(
1062
+ inference_state, out, suppressed_obj_ids=suppressed_obj_ids
1063
+ ),
1064
+ )
1065
+ else:
1066
+ yield frame_idx, None
1067
+
1068
+ return
1069
+
1070
+ # get Tracker inference states containing selected obj_ids
1071
+ if propagation_type == "propagation_partial":
1072
+ # can be empty for GPUs where objects are not in their inference states
1073
+ tracker_states_local = self._get_tracker_inference_states_by_obj_ids(
1074
+ inference_state, obj_ids
1075
+ )
1076
+ for tracker_state in tracker_states_local:
1077
+ self.tracker.propagate_in_video_preflight(
1078
+ tracker_state, run_mem_encoder=True
1079
+ )
1080
+
1081
+ for frame_idx in tqdm(processing_order):
1082
+ # run Tracker propagation
1083
+ if propagation_type == "propagation_partial":
1084
+ self._prepare_backbone_feats(inference_state, frame_idx, reverse)
1085
+ obj_ids_local, low_res_masks_local, tracker_scores_local = (
1086
+ self._propogate_tracker_one_frame_local_gpu(
1087
+ tracker_states_local,
1088
+ frame_idx=frame_idx,
1089
+ reverse=reverse,
1090
+ run_mem_encoder=True,
1091
+ )
1092
+ )
1093
+
1094
+ # broadcast refined object tracker scores and masks to all GPUs
1095
+ # handle multiple objects that can be located on different GPUs
1096
+ refined_obj_data = {} # obj_id -> (score, mask_video_res)
1097
+
1098
+ # Collect data for objects on this GPU
1099
+ local_obj_data = {}
1100
+ for obj_id in obj_ids:
1101
+ obj_rank = self._get_gpu_id_by_obj_id(inference_state, obj_id)
1102
+ if self.rank == obj_rank and obj_id in obj_ids_local:
1103
+ refined_obj_idx = obj_ids_local.index(obj_id)
1104
+ refined_mask_low_res = low_res_masks_local[
1105
+ refined_obj_idx
1106
+ ] # (H_low_res, W_low_res)
1107
+ refined_score = tracker_scores_local[refined_obj_idx]
1108
+
1109
+ # Keep low resolution for broadcasting to reduce communication cost
1110
+ local_obj_data[obj_id] = (refined_score, refined_mask_low_res)
1111
+
1112
+ # Broadcast data from each GPU that has refined objects
1113
+ if self.world_size > 1:
1114
+ for obj_id in obj_ids:
1115
+ obj_rank = self._get_gpu_id_by_obj_id(inference_state, obj_id)
1116
+ if self.rank == obj_rank:
1117
+ # This GPU has the object, broadcast its data
1118
+ data_to_broadcast = local_obj_data.get(obj_id, None)
1119
+ data_list = [
1120
+ (data_to_broadcast[0].cpu(), data_to_broadcast[1].cpu())
1121
+ ]
1122
+ self.broadcast_python_obj_cpu(data_list, src=obj_rank)
1123
+ if data_to_broadcast is not None:
1124
+ refined_obj_data[obj_id] = data_to_broadcast
1125
+ elif self.rank != obj_rank:
1126
+ # This GPU doesn't have the object, receive data
1127
+ data_list = [None]
1128
+ self.broadcast_python_obj_cpu(data_list, src=obj_rank)
1129
+ refined_obj_data[obj_id] = (
1130
+ data_list[0][0].to(self.device),
1131
+ data_list[0][1].to(self.device),
1132
+ )
1133
+ else:
1134
+ # Single GPU case
1135
+ refined_obj_data = local_obj_data
1136
+
1137
+ # Update Tracker scores for all refined objects
1138
+ for obj_id, (refined_score, _) in refined_obj_data.items():
1139
+ tracker_metadata["obj_id_to_tracker_score_frame_wise"][
1140
+ frame_idx
1141
+ ].update({obj_id: refined_score.item()})
1142
+
1143
+ if self.rank == 0:
1144
+ # get predictions from Tracker inference states, it includes the original
1145
+ # VG predictions and the refined predictions from interactivity.
1146
+
1147
+ # Prepare refined masks dictionary - upscale to video resolution after broadcast
1148
+ refined_obj_id_to_mask = {}
1149
+ for obj_id, (_, refined_mask_low_res) in refined_obj_data.items():
1150
+ refined_mask_video_res = (
1151
+ self._convert_low_res_mask_to_video_res(
1152
+ refined_mask_low_res, inference_state
1153
+ )
1154
+ ) # (1, H_video, W_video) bool
1155
+ refined_obj_id_to_mask[obj_id] = refined_mask_video_res
1156
+
1157
+ obj_id_to_mask = self._build_tracker_output(
1158
+ inference_state, frame_idx, refined_obj_id_to_mask
1159
+ )
1160
+ out = {
1161
+ "obj_id_to_mask": obj_id_to_mask,
1162
+ "obj_id_to_score": tracker_metadata["obj_id_to_score"],
1163
+ "obj_id_to_tracker_score": tracker_metadata[
1164
+ "obj_id_to_tracker_score_frame_wise"
1165
+ ][frame_idx],
1166
+ }
1167
+ suppressed_obj_ids = tracker_metadata["rank0_metadata"][
1168
+ "suppressed_obj_ids"
1169
+ ][frame_idx]
1170
+ self._cache_frame_outputs(
1171
+ inference_state,
1172
+ frame_idx,
1173
+ obj_id_to_mask,
1174
+ suppressed_obj_ids=suppressed_obj_ids,
1175
+ )
1176
+ suppressed_obj_ids = tracker_metadata["rank0_metadata"][
1177
+ "suppressed_obj_ids"
1178
+ ][frame_idx]
1179
+ yield (
1180
+ frame_idx,
1181
+ self._postprocess_output(
1182
+ inference_state, out, suppressed_obj_ids=suppressed_obj_ids
1183
+ ),
1184
+ )
1185
+ else:
1186
+ yield frame_idx, None
1187
+
1188
+ def add_action_history(
1189
+ self, inference_state, action_type, frame_idx=None, obj_ids=None
1190
+ ):
1191
+ """
1192
+ action_history is used to automatically decide what to do during propagation.
1193
+ action_type: one of ["add", "remove", "refine"] + ["propagation_full", "propagation_partial", "propagation_fetch"]
1194
+ """
1195
+ instance_actions = ["add", "remove", "refine"]
1196
+ propagation_actions = [
1197
+ "propagation_full",
1198
+ "propagation_partial",
1199
+ "propagation_fetch",
1200
+ ]
1201
+ assert (
1202
+ action_type in instance_actions + propagation_actions
1203
+ ), f"Invalid action type: {action_type}, must be one of {instance_actions + propagation_actions}"
1204
+ action = {
1205
+ "type": action_type,
1206
+ "frame_idx": frame_idx,
1207
+ "obj_ids": obj_ids,
1208
+ }
1209
+ inference_state["action_history"].append(action)
1210
+
1211
+ def _has_object_been_refined(self, inference_state, obj_id):
1212
+ action_history = inference_state["action_history"]
1213
+ for action in action_history:
1214
+ if action["type"] in ["add", "refine"] and action.get("obj_ids"):
1215
+ if obj_id in action["obj_ids"]:
1216
+ return True
1217
+ return False
1218
+
1219
+ def parse_action_history_for_propagation(self, inference_state):
1220
+ """
1221
+ Parse the actions in history before the last propagation and prepare for the next propagation.
1222
+ We support multiple actions (add/remove/refine) between two propagations. If we had an action
1223
+ history similar to this ["propagate", "add", "refine", "remove", "add"], the next propagation
1224
+ would remove the removed object, and also propagate the two added/refined objects.
1225
+
1226
+ Returns:
1227
+ propagation_type: one of ["propagation_full", "propagation_partial", "propagation_fetch"]
1228
+ - "propagation_full": run VG propagation for all objects
1229
+ - "propagation_partial": run Tracker propagation for selected objects, useful for add/refine actions
1230
+ - "propagation_fetch": fetch existing VG predictions without running any propagation
1231
+ obj_ids: list of object ids to run Tracker propagation on if propagation_type is "propagation_partial".
1232
+ """
1233
+ action_history = inference_state["action_history"]
1234
+ if len(action_history) == 0:
1235
+ # we run propagation for the first time
1236
+ return "propagation_full", None
1237
+
1238
+ if "propagation" in action_history[-1]["type"]:
1239
+ if action_history[-1]["type"] in ["propagation_fetch"]:
1240
+ # last propagation is direct fetch, we fetch existing predictions
1241
+ return "propagation_fetch", None
1242
+ elif action_history[-1]["type"] in [
1243
+ "propagation_partial",
1244
+ "propagation_full",
1245
+ ]:
1246
+ # we do fetch prediction if we have already run propagation twice or we have run
1247
+ # propagation once and it is from the first frame or last frame.
1248
+ if (
1249
+ len(action_history) > 1
1250
+ and action_history[-2]["type"]
1251
+ in ["propagation_partial", "propagation_full"]
1252
+ ) or action_history[-1]["frame_idx"] in [
1253
+ 0,
1254
+ inference_state["num_frames"] - 1,
1255
+ ]:
1256
+ # we have run both forward and backward partial/full propagation
1257
+ return "propagation_fetch", None
1258
+ else:
1259
+ # we have run partial/full forward or backward propagation once, need run it for the rest of the frames
1260
+ return action_history[-1]["type"], action_history[-1]["obj_ids"]
1261
+
1262
+ # parse actions since last propagation
1263
+ obj_ids = []
1264
+ for action in action_history[::-1]:
1265
+ if "propagation" in action["type"]:
1266
+ # we reached the last propagation action, stop parsing
1267
+ break
1268
+ if action["type"] in ["add", "refine"]:
1269
+ obj_ids.extend(action["obj_ids"])
1270
+ # else action["type"] == "remove": noop
1271
+ obj_ids = list(set(obj_ids)) if len(obj_ids) > 0 else None
1272
+ propagation_type = (
1273
+ "propagation_partial" if obj_ids is not None else "propagation_fetch"
1274
+ )
1275
+ return propagation_type, obj_ids
1276
+
1277
+ def remove_object(self, inference_state, obj_id, is_user_action=False):
1278
+ """
1279
+ We try to remove object from tracker states on every GPU, it will do nothing
1280
+ for states without this object.
1281
+ """
1282
+ obj_rank = self._get_gpu_id_by_obj_id(inference_state, obj_id)
1283
+ assert obj_rank is not None, f"Object {obj_id} not found in any GPU."
1284
+
1285
+ tracker_states_local = inference_state["tracker_inference_states"]
1286
+ if self.rank == obj_rank:
1287
+ self._tracker_remove_object(tracker_states_local, obj_id)
1288
+
1289
+ if is_user_action:
1290
+ self.add_action_history(
1291
+ inference_state, action_type="remove", obj_ids=[obj_id]
1292
+ )
1293
+
1294
+ # update metadata
1295
+ tracker_metadata = inference_state["tracker_metadata"]
1296
+ _obj_ids = tracker_metadata["obj_ids_per_gpu"][obj_rank]
1297
+ tracker_metadata["obj_ids_per_gpu"][obj_rank] = _obj_ids[_obj_ids != obj_id]
1298
+ tracker_metadata["num_obj_per_gpu"][obj_rank] = len(
1299
+ tracker_metadata["obj_ids_per_gpu"][obj_rank]
1300
+ )
1301
+ tracker_metadata["obj_ids_all_gpu"] = np.concatenate(
1302
+ tracker_metadata["obj_ids_per_gpu"]
1303
+ )
1304
+ tracker_metadata["obj_id_to_score"].pop(obj_id, None)
1305
+ # tracker_metadata["max_obj_id"] # we do not reuse the object id, so we do not update it here
1306
+
1307
+ # Clean up cached frame outputs to remove references to the deleted object
1308
+ if "cached_frame_outputs" in inference_state:
1309
+ for frame_idx in inference_state["cached_frame_outputs"]:
1310
+ frame_cache = inference_state["cached_frame_outputs"][frame_idx]
1311
+ if obj_id in frame_cache:
1312
+ del frame_cache[obj_id]
1313
+
1314
+ def _get_gpu_id_by_obj_id(self, inference_state, obj_id):
1315
+ """
1316
+ Locate GPU ID for a given object.
1317
+ """
1318
+ obj_ids_per_gpu = inference_state["tracker_metadata"]["obj_ids_per_gpu"]
1319
+ for rank, obj_ids in enumerate(obj_ids_per_gpu):
1320
+ if obj_id in obj_ids:
1321
+ return rank
1322
+ return None # object not found in any GPU
1323
+
1324
+ def _get_tracker_inference_states_by_obj_ids(self, inference_state, obj_ids):
1325
+ """
1326
+ Get the Tracker inference states that contain the given object ids.
1327
+ This is used to run partial Tracker propagation on a single object/bucket.
1328
+ Possibly multiple or zero states can be returned.
1329
+ """
1330
+ states = [
1331
+ state
1332
+ for state in inference_state["tracker_inference_states"]
1333
+ if set(obj_ids) & set(state["obj_ids"])
1334
+ ]
1335
+ return states
1336
+
1337
+ def _prepare_backbone_feats(self, inference_state, frame_idx, reverse):
1338
+ input_batch = inference_state["input_batch"]
1339
+ feature_cache = inference_state["feature_cache"]
1340
+ num_frames = inference_state["num_frames"]
1341
+ geometric_prompt = (
1342
+ inference_state["constants"]["empty_geometric_prompt"]
1343
+ if inference_state["per_frame_geometric_prompt"][frame_idx] is None
1344
+ else inference_state["per_frame_geometric_prompt"][frame_idx]
1345
+ )
1346
+ _ = self.run_backbone_and_detection(
1347
+ frame_idx=frame_idx,
1348
+ num_frames=num_frames,
1349
+ input_batch=input_batch,
1350
+ geometric_prompt=geometric_prompt,
1351
+ feature_cache=feature_cache,
1352
+ reverse=reverse,
1353
+ allow_new_detections=True,
1354
+ )
1355
+
1356
+ @torch.inference_mode()
1357
+ def add_prompt(
1358
+ self,
1359
+ inference_state,
1360
+ frame_idx,
1361
+ text_str=None,
1362
+ boxes_xywh=None,
1363
+ box_labels=None,
1364
+ points=None,
1365
+ point_labels=None,
1366
+ obj_id=None,
1367
+ rel_coordinates=True,
1368
+ ):
1369
+ if points is not None:
1370
+ # Tracker instance prompts
1371
+ assert (
1372
+ text_str is None and boxes_xywh is None
1373
+ ), "When points are provided, text_str and boxes_xywh must be None."
1374
+ assert (
1375
+ obj_id is not None
1376
+ ), "When points are provided, obj_id must be provided."
1377
+ return self.add_tracker_new_points(
1378
+ inference_state,
1379
+ frame_idx,
1380
+ obj_id=obj_id,
1381
+ points=points,
1382
+ labels=point_labels,
1383
+ rel_coordinates=rel_coordinates,
1384
+ use_prev_mem_frame=self.use_prev_mem_frame,
1385
+ )
1386
+ else:
1387
+ # SAM3 prompts
1388
+ return super().add_prompt(
1389
+ inference_state,
1390
+ frame_idx,
1391
+ text_str=text_str,
1392
+ boxes_xywh=boxes_xywh,
1393
+ box_labels=box_labels,
1394
+ )
1395
+
1396
+ @torch.inference_mode()
1397
+ def add_tracker_new_points(
1398
+ self,
1399
+ inference_state,
1400
+ frame_idx,
1401
+ obj_id,
1402
+ points,
1403
+ labels,
1404
+ rel_coordinates=True,
1405
+ use_prev_mem_frame=False,
1406
+ ):
1407
+ """Add a new point prompt to Tracker. Suppporting instance refinement to existing
1408
+ objects by passing existing obj_id or adding a new object by passing a new obj_id.
1409
+ use_prev_mem_frame=False to disable cross attention to previous memory frames.
1410
+ Every GPU returns the same results, and results should contain all masks including
1411
+ these masks not refined or not added by the current user points.
1412
+ """
1413
+ assert obj_id is not None, "obj_id must be provided to add new points"
1414
+ tracker_metadata = inference_state["tracker_metadata"]
1415
+ if tracker_metadata == {}:
1416
+ # initialize masklet metadata if it's uninitialized (empty dict)
1417
+ tracker_metadata.update(self._initialize_metadata())
1418
+
1419
+ obj_rank = self._get_gpu_id_by_obj_id(inference_state, obj_id)
1420
+
1421
+ # prepare feature
1422
+ self._prepare_backbone_feats(inference_state, frame_idx, reverse=False)
1423
+
1424
+ object_has_been_refined = self._has_object_been_refined(inference_state, obj_id)
1425
+ if (
1426
+ obj_rank is not None
1427
+ and self.use_stateless_refinement
1428
+ and not object_has_been_refined
1429
+ ):
1430
+ # The first time we start refinement on the object, we remove it.
1431
+ logger.debug(
1432
+ f"[rank={self.rank}] Removing object {obj_id} before refinement."
1433
+ )
1434
+ self.remove_object(inference_state, obj_id, is_user_action=False)
1435
+ obj_rank = None
1436
+
1437
+ if obj_rank is None:
1438
+ # new object, we assign it a GPU and create a new inference state if limit allows
1439
+ num_prev_obj = np.sum(tracker_metadata["num_obj_per_gpu"])
1440
+ if num_prev_obj >= self.max_num_objects:
1441
+ logger.warning(
1442
+ f"add_tracker_new_points: cannot add a new object as we are already tracking {num_prev_obj=} "
1443
+ f"masklets (under {self.max_num_objects=})"
1444
+ )
1445
+ obj_ids = []
1446
+ H_low_res = W_low_res = self.tracker.low_res_mask_size
1447
+ H_video_res = inference_state["orig_height"]
1448
+ W_video_res = inference_state["orig_width"]
1449
+ low_res_masks = torch.zeros(0, 1, H_low_res, W_low_res)
1450
+ video_res_masks = torch.zeros(0, 1, H_video_res, W_video_res)
1451
+ return frame_idx, obj_ids, low_res_masks, video_res_masks
1452
+
1453
+ new_det_gpu_ids = self._assign_new_det_to_gpus(
1454
+ new_det_num=1,
1455
+ prev_workload_per_gpu=tracker_metadata["num_obj_per_gpu"],
1456
+ )
1457
+ obj_rank = new_det_gpu_ids[0]
1458
+
1459
+ # get tracker inference state for the new object
1460
+ if self.rank == obj_rank:
1461
+ # for batched inference, we create a new inference state
1462
+ tracker_state = self._init_new_tracker_state(inference_state)
1463
+ inference_state["tracker_inference_states"].append(tracker_state)
1464
+
1465
+ # update metadata
1466
+ tracker_metadata["obj_ids_per_gpu"][obj_rank] = np.concatenate(
1467
+ [
1468
+ tracker_metadata["obj_ids_per_gpu"][obj_rank],
1469
+ np.array([obj_id], dtype=np.int64),
1470
+ ]
1471
+ )
1472
+ tracker_metadata["num_obj_per_gpu"][obj_rank] = len(
1473
+ tracker_metadata["obj_ids_per_gpu"][obj_rank]
1474
+ )
1475
+ tracker_metadata["obj_ids_all_gpu"] = np.concatenate(
1476
+ tracker_metadata["obj_ids_per_gpu"]
1477
+ )
1478
+ tracker_metadata["max_obj_id"] = max(tracker_metadata["max_obj_id"], obj_id)
1479
+
1480
+ logger.debug(
1481
+ f"[rank={self.rank}] Adding new object with id {obj_id} at frame {frame_idx}."
1482
+ )
1483
+ self.add_action_history(
1484
+ inference_state, "add", frame_idx=frame_idx, obj_ids=[obj_id]
1485
+ )
1486
+ else:
1487
+ # existing object, for refinement
1488
+ if self.rank == obj_rank:
1489
+ tracker_states = self._get_tracker_inference_states_by_obj_ids(
1490
+ inference_state, [obj_id]
1491
+ )
1492
+ assert (
1493
+ len(tracker_states) == 1
1494
+ ), f"[rank={self.rank}] Multiple Tracker inference states found for the same object id."
1495
+ tracker_state = tracker_states[0]
1496
+
1497
+ # log
1498
+ logger.debug(
1499
+ f"[rank={self.rank}] Refining existing object with id {obj_id} at frame {frame_idx}."
1500
+ )
1501
+ self.add_action_history(
1502
+ inference_state, "refine", frame_idx=frame_idx, obj_ids=[obj_id]
1503
+ )
1504
+
1505
+ # assign higher score to added/refined object
1506
+ tracker_metadata["obj_id_to_score"][obj_id] = 1.0
1507
+ tracker_metadata["obj_id_to_tracker_score_frame_wise"][frame_idx][obj_id] = 1.0
1508
+
1509
+ if self.rank == 0:
1510
+ rank0_metadata = tracker_metadata.get("rank0_metadata", {})
1511
+
1512
+ if "removed_obj_ids" in rank0_metadata:
1513
+ rank0_metadata["removed_obj_ids"].discard(obj_id)
1514
+
1515
+ if "suppressed_obj_ids" in rank0_metadata:
1516
+ for frame_id in rank0_metadata["suppressed_obj_ids"]:
1517
+ rank0_metadata["suppressed_obj_ids"][frame_id].discard(obj_id)
1518
+
1519
+ if "masklet_confirmation" in rank0_metadata:
1520
+ obj_ids_all_gpu = tracker_metadata["obj_ids_all_gpu"]
1521
+ obj_indices = np.where(obj_ids_all_gpu == obj_id)[0]
1522
+ if len(obj_indices) > 0:
1523
+ obj_idx = obj_indices[0]
1524
+ if obj_idx < len(rank0_metadata["masklet_confirmation"]["status"]):
1525
+ rank0_metadata["masklet_confirmation"]["status"][obj_idx] = 1
1526
+ rank0_metadata["masklet_confirmation"]["consecutive_det_num"][
1527
+ obj_idx
1528
+ ] = self.masklet_confirmation_consecutive_det_thresh
1529
+
1530
+ if self.rank == obj_rank:
1531
+ frame_idx, obj_ids, low_res_masks, video_res_masks = (
1532
+ self.tracker.add_new_points(
1533
+ inference_state=tracker_state,
1534
+ frame_idx=frame_idx,
1535
+ obj_id=obj_id,
1536
+ points=points,
1537
+ labels=labels,
1538
+ clear_old_points=True,
1539
+ rel_coordinates=rel_coordinates,
1540
+ use_prev_mem_frame=use_prev_mem_frame,
1541
+ )
1542
+ )
1543
+
1544
+ if video_res_masks is not None and len(video_res_masks) > 0:
1545
+ video_res_masks = fill_holes_in_mask_scores(
1546
+ video_res_masks, # shape (N, 1, H_video, W_video)
1547
+ max_area=self.fill_hole_area,
1548
+ fill_holes=True,
1549
+ remove_sprinkles=True,
1550
+ )
1551
+
1552
+ # Since the mem encoder has already run for the current input points?
1553
+ self.tracker.propagate_in_video_preflight(
1554
+ tracker_state, run_mem_encoder=True
1555
+ )
1556
+ # Clear detector conditioning frames when user clicks are received to allow
1557
+ # model updating masks on these frames. It is a noop if user is refining on the
1558
+ # detector conditioning frames or adding new objects.
1559
+ self.clear_detector_added_cond_frame_in_tracker(
1560
+ tracker_state, obj_id, frame_idx
1561
+ )
1562
+
1563
+ # fetch results from states and gather across GPUs
1564
+ # Use optimized caching approach to avoid reprocessing unmodified objects
1565
+ if self.rank == obj_rank and len(obj_ids) > 0:
1566
+ new_mask_data = (video_res_masks[obj_ids.index(obj_id)] > 0.0).to(
1567
+ torch.bool
1568
+ )
1569
+ else:
1570
+ new_mask_data = None
1571
+ # Broadcast the new mask data across all ranks for consistency
1572
+ if self.world_size > 1:
1573
+ data_list = [new_mask_data.cpu() if new_mask_data is not None else None]
1574
+ self.broadcast_python_obj_cpu(data_list, src=obj_rank)
1575
+ new_mask_data = data_list[0].to(self.device)
1576
+
1577
+ if self.rank == 0:
1578
+ obj_id_to_mask = self._build_tracker_output(
1579
+ inference_state,
1580
+ frame_idx,
1581
+ {obj_id: new_mask_data} if new_mask_data is not None else None,
1582
+ )
1583
+ # post processing - remove suppressed obj_ids
1584
+ obj_id_to_score = tracker_metadata["obj_id_to_score"]
1585
+ suppressed_obj_ids = tracker_metadata["rank0_metadata"][
1586
+ "suppressed_obj_ids"
1587
+ ][frame_idx]
1588
+ obj_id_to_tracker_score = tracker_metadata[
1589
+ "obj_id_to_tracker_score_frame_wise"
1590
+ ][frame_idx]
1591
+
1592
+ out = {
1593
+ "obj_id_to_mask": obj_id_to_mask,
1594
+ "obj_id_to_score": obj_id_to_score,
1595
+ "obj_id_to_tracker_score": obj_id_to_tracker_score,
1596
+ }
1597
+ self._cache_frame_outputs(
1598
+ inference_state,
1599
+ frame_idx,
1600
+ obj_id_to_mask,
1601
+ suppressed_obj_ids=suppressed_obj_ids,
1602
+ )
1603
+ return frame_idx, self._postprocess_output(
1604
+ inference_state, out, suppressed_obj_ids=suppressed_obj_ids
1605
+ )
1606
+ else:
1607
+ return frame_idx, None # no output on other GPUs
1608
+
1609
+ def _gather_obj_id_to_mask_across_gpus(self, inference_state, obj_id_to_mask_local):
1610
+ """Gather obj_id_to_mask from all GPUs. Optionally resize the masks to the video resolution."""
1611
+ tracker_metadata = inference_state["tracker_metadata"]
1612
+
1613
+ # concatenate the output masklets from all local inference states
1614
+ H_mask = W_mask = self.tracker.low_res_mask_size
1615
+ obj_ids_local = tracker_metadata["obj_ids_per_gpu"][self.rank]
1616
+ low_res_masks_local = []
1617
+ for obj_id in obj_ids_local:
1618
+ if obj_id in obj_id_to_mask_local:
1619
+ low_res_masks_local.append(obj_id_to_mask_local[obj_id])
1620
+ else:
1621
+ low_res_masks_local.append(
1622
+ torch.full((H_mask, W_mask), -1024.0, device=self.device)
1623
+ )
1624
+ if len(low_res_masks_local) > 0:
1625
+ low_res_masks_local = torch.stack(low_res_masks_local, dim=0) # (N, H, W)
1626
+ assert low_res_masks_local.shape[1:] == (H_mask, W_mask)
1627
+ else:
1628
+ low_res_masks_local = torch.zeros(0, H_mask, W_mask, device=self.device)
1629
+
1630
+ # all-gather `low_res_masks_local` into `low_res_masks_global`
1631
+ # - low_res_masks_global: Tensor -- (num_global_obj, H_mask, W_mask)
1632
+ if self.world_size > 1:
1633
+ low_res_masks_local = low_res_masks_local.float().contiguous()
1634
+ low_res_masks_peers = [
1635
+ low_res_masks_local.new_empty(num_obj, H_mask, W_mask)
1636
+ for num_obj in tracker_metadata["num_obj_per_gpu"]
1637
+ ]
1638
+ dist.all_gather(low_res_masks_peers, low_res_masks_local)
1639
+ low_res_masks_global = torch.cat(low_res_masks_peers, dim=0)
1640
+ else:
1641
+ low_res_masks_global = low_res_masks_local
1642
+ return low_res_masks_global
1643
+
1644
+ def _convert_low_res_mask_to_video_res(self, low_res_mask, inference_state):
1645
+ """
1646
+ Convert a low-res mask to video resolution, matching the format expected by _build_tracker_output.
1647
+
1648
+ Args:
1649
+ low_res_mask: Tensor of shape (H_low_res, W_low_res)
1650
+ inference_state: Contains video dimensions
1651
+
1652
+ Returns:
1653
+ video_res_mask: Tensor of shape (1, H_video, W_video) bool
1654
+ """
1655
+ if low_res_mask is None:
1656
+ return None
1657
+
1658
+ # Convert to 3D for interpolation: (H_low_res, W_low_res) -> (1, H_low_res, W_low_res)
1659
+ low_res_mask_3d = low_res_mask.unsqueeze(0).unsqueeze(0)
1660
+
1661
+ # Get video dimensions
1662
+ H_video = inference_state["orig_height"]
1663
+ W_video = inference_state["orig_width"]
1664
+
1665
+ video_res_mask = F.interpolate(
1666
+ low_res_mask_3d.float(),
1667
+ size=(H_video, W_video),
1668
+ mode="bilinear",
1669
+ align_corners=False,
1670
+ ) # (1, H_video, W_video)
1671
+
1672
+ # Convert to boolean - already in the right shape!
1673
+ return (video_res_mask.squeeze(0) > 0.0).to(torch.bool)
1674
+
1675
+ def clear_detector_added_cond_frame_in_tracker(
1676
+ self, tracker_state, obj_id, refined_frame_idx
1677
+ ):
1678
+ """Clear detector added conditioning frame if it is within a predefined window
1679
+ of the refined frame. This allow model to update masks on these frames."""
1680
+ obj_idx = self.tracker._obj_id_to_idx(tracker_state, obj_id)
1681
+
1682
+ mask_only_cond_frame_indices = []
1683
+ window = self.refinement_detector_cond_frame_removal_window
1684
+ for frame_idx in tracker_state["mask_inputs_per_obj"][obj_idx]:
1685
+ if frame_idx not in tracker_state["point_inputs_per_obj"][obj_idx]:
1686
+ # clear conditioning frames within a window of the refined frame
1687
+ if abs(frame_idx - refined_frame_idx) <= window:
1688
+ mask_only_cond_frame_indices.append(frame_idx)
1689
+
1690
+ # clear
1691
+ if len(mask_only_cond_frame_indices) > 0:
1692
+ for frame_idx in mask_only_cond_frame_indices:
1693
+ # obj_ids_on_this_frame is essentially all obj_ids in the state
1694
+ # since they are bucket batched
1695
+ obj_ids_on_this_frame = tracker_state["obj_id_to_idx"].keys()
1696
+ for obj_id2 in obj_ids_on_this_frame:
1697
+ self.tracker.clear_all_points_in_frame(
1698
+ tracker_state, frame_idx, obj_id2, need_output=False
1699
+ )
1700
+ logger.debug(
1701
+ f"Cleared detector mask only conditioning frames ({mask_only_cond_frame_indices}) in Tracker."
1702
+ )
1703
+ return
1704
+
1705
+
1706
+ def is_image_type(resource_path: str) -> bool:
1707
+ if isinstance(resource_path, list):
1708
+ return len(resource_path) == 1
1709
+ return resource_path.lower().endswith(tuple(IMAGE_EXTS))
source_code/sam3/sam3/model_builder.py ADDED
@@ -0,0 +1,793 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
2
+
3
+ import os
4
+ from typing import Optional
5
+
6
+ import torch
7
+ import torch.nn as nn
8
+ from huggingface_hub import hf_hub_download
9
+ from iopath.common.file_io import g_pathmgr
10
+ from sam3.model.decoder import (
11
+ TransformerDecoder,
12
+ TransformerDecoderLayer,
13
+ TransformerDecoderLayerv2,
14
+ TransformerEncoderCrossAttention,
15
+ )
16
+ from sam3.model.encoder import TransformerEncoderFusion, TransformerEncoderLayer
17
+ from sam3.model.geometry_encoders import SequenceGeometryEncoder
18
+ from sam3.model.maskformer_segmentation import PixelDecoder, UniversalSegmentationHead
19
+ from sam3.model.memory import (
20
+ CXBlock,
21
+ SimpleFuser,
22
+ SimpleMaskDownSampler,
23
+ SimpleMaskEncoder,
24
+ )
25
+ from sam3.model.model_misc import (
26
+ DotProductScoring,
27
+ MLP,
28
+ MultiheadAttentionWrapper as MultiheadAttention,
29
+ TransformerWrapper,
30
+ )
31
+ from sam3.model.necks import Sam3DualViTDetNeck
32
+ from sam3.model.position_encoding import PositionEmbeddingSine
33
+ from sam3.model.sam1_task_predictor import SAM3InteractiveImagePredictor
34
+ from sam3.model.sam3_image import Sam3Image, Sam3ImageOnVideoMultiGPU
35
+ from sam3.model.sam3_tracking_predictor import Sam3TrackerPredictor
36
+ from sam3.model.sam3_video_inference import Sam3VideoInferenceWithInstanceInteractivity
37
+ from sam3.model.sam3_video_predictor import Sam3VideoPredictorMultiGPU
38
+ from sam3.model.text_encoder_ve import VETextEncoder
39
+ from sam3.model.tokenizer_ve import SimpleTokenizer
40
+ from sam3.model.vitdet import ViT
41
+ from sam3.model.vl_combiner import SAM3VLBackbone
42
+ from sam3.sam.transformer import RoPEAttention
43
+
44
+
45
+ # Setup TensorFloat-32 for Ampere GPUs if available
46
+ def _setup_tf32() -> None:
47
+ """Enable TensorFloat-32 for Ampere GPUs if available."""
48
+ if torch.cuda.is_available():
49
+ device_props = torch.cuda.get_device_properties(0)
50
+ if device_props.major >= 8:
51
+ torch.backends.cuda.matmul.allow_tf32 = True
52
+ torch.backends.cudnn.allow_tf32 = True
53
+
54
+
55
+ _setup_tf32()
56
+
57
+
58
+ def _create_position_encoding(precompute_resolution=None):
59
+ """Create position encoding for visual backbone."""
60
+ return PositionEmbeddingSine(
61
+ num_pos_feats=256,
62
+ normalize=True,
63
+ scale=None,
64
+ temperature=10000,
65
+ precompute_resolution=precompute_resolution,
66
+ )
67
+
68
+
69
+ def _create_vit_backbone(compile_mode=None):
70
+ """Create ViT backbone for visual feature extraction."""
71
+ return ViT(
72
+ img_size=1008,
73
+ pretrain_img_size=336,
74
+ patch_size=14,
75
+ embed_dim=1024,
76
+ depth=32,
77
+ num_heads=16,
78
+ mlp_ratio=4.625,
79
+ norm_layer="LayerNorm",
80
+ drop_path_rate=0.1,
81
+ qkv_bias=True,
82
+ use_abs_pos=True,
83
+ tile_abs_pos=True,
84
+ global_att_blocks=(7, 15, 23, 31),
85
+ rel_pos_blocks=(),
86
+ use_rope=True,
87
+ use_interp_rope=True,
88
+ window_size=24,
89
+ pretrain_use_cls_token=True,
90
+ retain_cls_token=False,
91
+ ln_pre=True,
92
+ ln_post=False,
93
+ return_interm_layers=False,
94
+ bias_patch_embed=False,
95
+ compile_mode=compile_mode,
96
+ )
97
+
98
+
99
+ def _create_vit_neck(position_encoding, vit_backbone, enable_inst_interactivity=False):
100
+ """Create ViT neck for feature pyramid."""
101
+ return Sam3DualViTDetNeck(
102
+ position_encoding=position_encoding,
103
+ d_model=256,
104
+ scale_factors=[4.0, 2.0, 1.0, 0.5],
105
+ trunk=vit_backbone,
106
+ add_sam2_neck=enable_inst_interactivity,
107
+ )
108
+
109
+
110
+ def _create_vl_backbone(vit_neck, text_encoder):
111
+ """Create visual-language backbone."""
112
+ return SAM3VLBackbone(visual=vit_neck, text=text_encoder, scalp=1)
113
+
114
+
115
+ def _create_transformer_encoder() -> TransformerEncoderFusion:
116
+ """Create transformer encoder with its layer."""
117
+ encoder_layer = TransformerEncoderLayer(
118
+ activation="relu",
119
+ d_model=256,
120
+ dim_feedforward=2048,
121
+ dropout=0.1,
122
+ pos_enc_at_attn=True,
123
+ pos_enc_at_cross_attn_keys=False,
124
+ pos_enc_at_cross_attn_queries=False,
125
+ pre_norm=True,
126
+ self_attention=MultiheadAttention(
127
+ num_heads=8,
128
+ dropout=0.1,
129
+ embed_dim=256,
130
+ batch_first=True,
131
+ ),
132
+ cross_attention=MultiheadAttention(
133
+ num_heads=8,
134
+ dropout=0.1,
135
+ embed_dim=256,
136
+ batch_first=True,
137
+ ),
138
+ )
139
+
140
+ encoder = TransformerEncoderFusion(
141
+ layer=encoder_layer,
142
+ num_layers=6,
143
+ d_model=256,
144
+ num_feature_levels=1,
145
+ frozen=False,
146
+ use_act_checkpoint=True,
147
+ add_pooled_text_to_img_feat=False,
148
+ pool_text_with_mask=True,
149
+ )
150
+ return encoder
151
+
152
+
153
+ def _create_transformer_decoder() -> TransformerDecoder:
154
+ """Create transformer decoder with its layer."""
155
+ decoder_layer = TransformerDecoderLayer(
156
+ activation="relu",
157
+ d_model=256,
158
+ dim_feedforward=2048,
159
+ dropout=0.1,
160
+ cross_attention=MultiheadAttention(
161
+ num_heads=8,
162
+ dropout=0.1,
163
+ embed_dim=256,
164
+ ),
165
+ n_heads=8,
166
+ use_text_cross_attention=True,
167
+ )
168
+
169
+ decoder = TransformerDecoder(
170
+ layer=decoder_layer,
171
+ num_layers=6,
172
+ num_queries=200,
173
+ return_intermediate=True,
174
+ box_refine=True,
175
+ num_o2m_queries=0,
176
+ dac=True,
177
+ boxRPB="log",
178
+ d_model=256,
179
+ frozen=False,
180
+ interaction_layer=None,
181
+ dac_use_selfatt_ln=True,
182
+ resolution=1008,
183
+ stride=14,
184
+ use_act_checkpoint=True,
185
+ presence_token=True,
186
+ )
187
+ return decoder
188
+
189
+
190
+ def _create_dot_product_scoring():
191
+ """Create dot product scoring module."""
192
+ prompt_mlp = MLP(
193
+ input_dim=256,
194
+ hidden_dim=2048,
195
+ output_dim=256,
196
+ num_layers=2,
197
+ dropout=0.1,
198
+ residual=True,
199
+ out_norm=nn.LayerNorm(256),
200
+ )
201
+ return DotProductScoring(d_model=256, d_proj=256, prompt_mlp=prompt_mlp)
202
+
203
+
204
+ def _create_segmentation_head(compile_mode=None):
205
+ """Create segmentation head with pixel decoder."""
206
+ pixel_decoder = PixelDecoder(
207
+ num_upsampling_stages=3,
208
+ interpolation_mode="nearest",
209
+ hidden_dim=256,
210
+ compile_mode=compile_mode,
211
+ )
212
+
213
+ cross_attend_prompt = MultiheadAttention(
214
+ num_heads=8,
215
+ dropout=0,
216
+ embed_dim=256,
217
+ )
218
+
219
+ segmentation_head = UniversalSegmentationHead(
220
+ hidden_dim=256,
221
+ upsampling_stages=3,
222
+ aux_masks=False,
223
+ presence_head=False,
224
+ dot_product_scorer=None,
225
+ act_ckpt=True,
226
+ cross_attend_prompt=cross_attend_prompt,
227
+ pixel_decoder=pixel_decoder,
228
+ )
229
+ return segmentation_head
230
+
231
+
232
+ def _create_geometry_encoder():
233
+ """Create geometry encoder with all its components."""
234
+ # Create position encoding for geometry encoder
235
+ geo_pos_enc = _create_position_encoding()
236
+ # Create CX block for fuser
237
+ cx_block = CXBlock(
238
+ dim=256,
239
+ kernel_size=7,
240
+ padding=3,
241
+ layer_scale_init_value=1.0e-06,
242
+ use_dwconv=True,
243
+ )
244
+ # Create geometry encoder layer
245
+ geo_layer = TransformerEncoderLayer(
246
+ activation="relu",
247
+ d_model=256,
248
+ dim_feedforward=2048,
249
+ dropout=0.1,
250
+ pos_enc_at_attn=False,
251
+ pre_norm=True,
252
+ self_attention=MultiheadAttention(
253
+ num_heads=8,
254
+ dropout=0.1,
255
+ embed_dim=256,
256
+ batch_first=False,
257
+ ),
258
+ pos_enc_at_cross_attn_queries=False,
259
+ pos_enc_at_cross_attn_keys=True,
260
+ cross_attention=MultiheadAttention(
261
+ num_heads=8,
262
+ dropout=0.1,
263
+ embed_dim=256,
264
+ batch_first=False,
265
+ ),
266
+ )
267
+
268
+ # Create geometry encoder
269
+ input_geometry_encoder = SequenceGeometryEncoder(
270
+ pos_enc=geo_pos_enc,
271
+ encode_boxes_as_points=False,
272
+ points_direct_project=True,
273
+ points_pool=True,
274
+ points_pos_enc=True,
275
+ boxes_direct_project=True,
276
+ boxes_pool=True,
277
+ boxes_pos_enc=True,
278
+ d_model=256,
279
+ num_layers=3,
280
+ layer=geo_layer,
281
+ use_act_ckpt=True,
282
+ add_cls=True,
283
+ add_post_encode_proj=True,
284
+ )
285
+ return input_geometry_encoder
286
+
287
+
288
+ def _create_sam3_model(
289
+ backbone,
290
+ transformer,
291
+ input_geometry_encoder,
292
+ segmentation_head,
293
+ dot_prod_scoring,
294
+ inst_interactive_predictor,
295
+ eval_mode,
296
+ ):
297
+ """Create the SAM3 image model."""
298
+ common_params = {
299
+ "backbone": backbone,
300
+ "transformer": transformer,
301
+ "input_geometry_encoder": input_geometry_encoder,
302
+ "segmentation_head": segmentation_head,
303
+ "num_feature_levels": 1,
304
+ "o2m_mask_predict": True,
305
+ "dot_prod_scoring": dot_prod_scoring,
306
+ "use_instance_query": False,
307
+ "multimask_output": True,
308
+ "inst_interactive_predictor": inst_interactive_predictor,
309
+ }
310
+
311
+ matcher = None
312
+ if not eval_mode:
313
+ from sam3.train.matcher import BinaryHungarianMatcherV2
314
+
315
+ matcher = BinaryHungarianMatcherV2(
316
+ focal=True,
317
+ cost_class=2.0,
318
+ cost_bbox=5.0,
319
+ cost_giou=2.0,
320
+ alpha=0.25,
321
+ gamma=2,
322
+ stable=False,
323
+ )
324
+ common_params["matcher"] = matcher
325
+ model = Sam3Image(**common_params)
326
+
327
+ return model
328
+
329
+
330
+ def _create_tracker_maskmem_backbone():
331
+ """Create the SAM3 Tracker memory encoder."""
332
+ # Position encoding for mask memory backbone
333
+ position_encoding = PositionEmbeddingSine(
334
+ num_pos_feats=64,
335
+ normalize=True,
336
+ scale=None,
337
+ temperature=10000,
338
+ precompute_resolution=1008,
339
+ )
340
+
341
+ # Mask processing components
342
+ mask_downsampler = SimpleMaskDownSampler(
343
+ kernel_size=3, stride=2, padding=1, interpol_size=[1152, 1152]
344
+ )
345
+
346
+ cx_block_layer = CXBlock(
347
+ dim=256,
348
+ kernel_size=7,
349
+ padding=3,
350
+ layer_scale_init_value=1.0e-06,
351
+ use_dwconv=True,
352
+ )
353
+
354
+ fuser = SimpleFuser(layer=cx_block_layer, num_layers=2)
355
+
356
+ maskmem_backbone = SimpleMaskEncoder(
357
+ out_dim=64,
358
+ position_encoding=position_encoding,
359
+ mask_downsampler=mask_downsampler,
360
+ fuser=fuser,
361
+ )
362
+
363
+ return maskmem_backbone
364
+
365
+
366
+ def _create_tracker_transformer():
367
+ """Create the SAM3 Tracker transformer components."""
368
+ # Self attention
369
+ self_attention = RoPEAttention(
370
+ embedding_dim=256,
371
+ num_heads=1,
372
+ downsample_rate=1,
373
+ dropout=0.1,
374
+ rope_theta=10000.0,
375
+ feat_sizes=[72, 72],
376
+ use_fa3=False,
377
+ use_rope_real=False,
378
+ )
379
+
380
+ # Cross attention
381
+ cross_attention = RoPEAttention(
382
+ embedding_dim=256,
383
+ num_heads=1,
384
+ downsample_rate=1,
385
+ dropout=0.1,
386
+ kv_in_dim=64,
387
+ rope_theta=10000.0,
388
+ feat_sizes=[72, 72],
389
+ rope_k_repeat=True,
390
+ use_fa3=False,
391
+ use_rope_real=False,
392
+ )
393
+
394
+ # Encoder layer
395
+ encoder_layer = TransformerDecoderLayerv2(
396
+ cross_attention_first=False,
397
+ activation="relu",
398
+ dim_feedforward=2048,
399
+ dropout=0.1,
400
+ pos_enc_at_attn=False,
401
+ pre_norm=True,
402
+ self_attention=self_attention,
403
+ d_model=256,
404
+ pos_enc_at_cross_attn_keys=True,
405
+ pos_enc_at_cross_attn_queries=False,
406
+ cross_attention=cross_attention,
407
+ )
408
+
409
+ # Encoder
410
+ encoder = TransformerEncoderCrossAttention(
411
+ remove_cross_attention_layers=[],
412
+ batch_first=True,
413
+ d_model=256,
414
+ frozen=False,
415
+ pos_enc_at_input=True,
416
+ layer=encoder_layer,
417
+ num_layers=4,
418
+ use_act_checkpoint=False,
419
+ )
420
+
421
+ # Transformer wrapper
422
+ transformer = TransformerWrapper(
423
+ encoder=encoder,
424
+ decoder=None,
425
+ d_model=256,
426
+ )
427
+
428
+ return transformer
429
+
430
+
431
+ def build_tracker(
432
+ apply_temporal_disambiguation: bool, with_backbone: bool = False, compile_mode=None
433
+ ) -> Sam3TrackerPredictor:
434
+ """
435
+ Build the SAM3 Tracker module for video tracking.
436
+
437
+ Returns:
438
+ Sam3TrackerPredictor: Wrapped SAM3 Tracker module
439
+ """
440
+
441
+ # Create model components
442
+ maskmem_backbone = _create_tracker_maskmem_backbone()
443
+ transformer = _create_tracker_transformer()
444
+ backbone = None
445
+ if with_backbone:
446
+ vision_backbone = _create_vision_backbone(compile_mode=compile_mode)
447
+ backbone = SAM3VLBackbone(scalp=1, visual=vision_backbone, text=None)
448
+ # Create the Tracker module
449
+ model = Sam3TrackerPredictor(
450
+ image_size=1008,
451
+ num_maskmem=7,
452
+ backbone=backbone,
453
+ backbone_stride=14,
454
+ transformer=transformer,
455
+ maskmem_backbone=maskmem_backbone,
456
+ # SAM parameters
457
+ multimask_output_in_sam=True,
458
+ # Evaluation
459
+ forward_backbone_per_frame_for_eval=True,
460
+ trim_past_non_cond_mem_for_eval=False,
461
+ # Multimask
462
+ multimask_output_for_tracking=True,
463
+ multimask_min_pt_num=0,
464
+ multimask_max_pt_num=1,
465
+ # Additional settings
466
+ always_start_from_first_ann_frame=False,
467
+ # Mask overlap
468
+ non_overlap_masks_for_mem_enc=False,
469
+ non_overlap_masks_for_output=False,
470
+ max_cond_frames_in_attn=4,
471
+ offload_output_to_cpu_for_eval=False,
472
+ # SAM decoder settings
473
+ sam_mask_decoder_extra_args={
474
+ "dynamic_multimask_via_stability": True,
475
+ "dynamic_multimask_stability_delta": 0.05,
476
+ "dynamic_multimask_stability_thresh": 0.98,
477
+ },
478
+ clear_non_cond_mem_around_input=True,
479
+ fill_hole_area=0,
480
+ use_memory_selection=apply_temporal_disambiguation,
481
+ )
482
+
483
+ return model
484
+
485
+
486
+ def _create_text_encoder(bpe_path: str) -> VETextEncoder:
487
+ """Create SAM3 text encoder."""
488
+ tokenizer = SimpleTokenizer(bpe_path=bpe_path)
489
+ return VETextEncoder(
490
+ tokenizer=tokenizer,
491
+ d_model=256,
492
+ width=1024,
493
+ heads=16,
494
+ layers=24,
495
+ )
496
+
497
+
498
+ def _create_vision_backbone(
499
+ compile_mode=None, enable_inst_interactivity=True
500
+ ) -> Sam3DualViTDetNeck:
501
+ """Create SAM3 visual backbone with ViT and neck."""
502
+ # Position encoding
503
+ position_encoding = _create_position_encoding(precompute_resolution=1008)
504
+ # ViT backbone
505
+ vit_backbone: ViT = _create_vit_backbone(compile_mode=compile_mode)
506
+ vit_neck: Sam3DualViTDetNeck = _create_vit_neck(
507
+ position_encoding,
508
+ vit_backbone,
509
+ enable_inst_interactivity=enable_inst_interactivity,
510
+ )
511
+ # Visual neck
512
+ return vit_neck
513
+
514
+
515
+ def _create_sam3_transformer(has_presence_token: bool = True) -> TransformerWrapper:
516
+ """Create SAM3 transformer encoder and decoder."""
517
+ encoder: TransformerEncoderFusion = _create_transformer_encoder()
518
+ decoder: TransformerDecoder = _create_transformer_decoder()
519
+
520
+ return TransformerWrapper(encoder=encoder, decoder=decoder, d_model=256)
521
+
522
+
523
+ def _load_checkpoint(model, checkpoint_path):
524
+ """Load model checkpoint from file."""
525
+ with g_pathmgr.open(checkpoint_path, "rb") as f:
526
+ ckpt = torch.load(f, map_location="cpu", weights_only=True)
527
+ if "model" in ckpt and isinstance(ckpt["model"], dict):
528
+ ckpt = ckpt["model"]
529
+ sam3_image_ckpt = {
530
+ k.replace("detector.", ""): v for k, v in ckpt.items() if "detector" in k
531
+ }
532
+ if model.inst_interactive_predictor is not None:
533
+ sam3_image_ckpt.update(
534
+ {
535
+ k.replace("tracker.", "inst_interactive_predictor.model."): v
536
+ for k, v in ckpt.items()
537
+ if "tracker" in k
538
+ }
539
+ )
540
+ missing_keys, _ = model.load_state_dict(sam3_image_ckpt, strict=False)
541
+ if len(missing_keys) > 0:
542
+ print(
543
+ f"loaded {checkpoint_path} and found "
544
+ f"missing and/or unexpected keys:\n{missing_keys=}"
545
+ )
546
+
547
+
548
+ def _setup_device_and_mode(model, device, eval_mode):
549
+ """Setup model device and evaluation mode."""
550
+ if device == "cuda":
551
+ model = model.cuda()
552
+ if eval_mode:
553
+ model.eval()
554
+ return model
555
+
556
+
557
+ def build_sam3_image_model(
558
+ bpe_path=None,
559
+ device="cuda" if torch.cuda.is_available() else "cpu",
560
+ eval_mode=True,
561
+ checkpoint_path=None,
562
+ load_from_HF=True,
563
+ enable_segmentation=True,
564
+ enable_inst_interactivity=False,
565
+ compile=False,
566
+ ):
567
+ """
568
+ Build SAM3 image model
569
+
570
+ Args:
571
+ bpe_path: Path to the BPE tokenizer vocabulary
572
+ device: Device to load the model on ('cuda' or 'cpu')
573
+ eval_mode: Whether to set the model to evaluation mode
574
+ checkpoint_path: Optional path to model checkpoint
575
+ enable_segmentation: Whether to enable segmentation head
576
+ enable_inst_interactivity: Whether to enable instance interactivity (SAM 1 task)
577
+ compile_mode: To enable compilation, set to "default"
578
+
579
+ Returns:
580
+ A SAM3 image model
581
+ """
582
+ if bpe_path is None:
583
+ bpe_path = os.path.join(
584
+ os.path.dirname(__file__), "..", "assets", "bpe_simple_vocab_16e6.txt.gz"
585
+ )
586
+ # Create visual components
587
+ compile_mode = "default" if compile else None
588
+ vision_encoder = _create_vision_backbone(
589
+ compile_mode=compile_mode, enable_inst_interactivity=enable_inst_interactivity
590
+ )
591
+
592
+ # Create text components
593
+ text_encoder = _create_text_encoder(bpe_path)
594
+
595
+ # Create visual-language backbone
596
+ backbone = _create_vl_backbone(vision_encoder, text_encoder)
597
+
598
+ # Create transformer components
599
+ transformer = _create_sam3_transformer()
600
+
601
+ # Create dot product scoring
602
+ dot_prod_scoring = _create_dot_product_scoring()
603
+
604
+ # Create segmentation head if enabled
605
+ segmentation_head = (
606
+ _create_segmentation_head(compile_mode=compile_mode)
607
+ if enable_segmentation
608
+ else None
609
+ )
610
+
611
+ # Create geometry encoder
612
+ input_geometry_encoder = _create_geometry_encoder()
613
+ if enable_inst_interactivity:
614
+ sam3_pvs_base = build_tracker(apply_temporal_disambiguation=False)
615
+ inst_predictor = SAM3InteractiveImagePredictor(sam3_pvs_base)
616
+ else:
617
+ inst_predictor = None
618
+ # Create the SAM3 model
619
+ model = _create_sam3_model(
620
+ backbone,
621
+ transformer,
622
+ input_geometry_encoder,
623
+ segmentation_head,
624
+ dot_prod_scoring,
625
+ inst_predictor,
626
+ eval_mode,
627
+ )
628
+ if load_from_HF and checkpoint_path is None:
629
+ checkpoint_path = download_ckpt_from_hf()
630
+ # Load checkpoint if provided
631
+ if checkpoint_path is not None:
632
+ _load_checkpoint(model, checkpoint_path)
633
+
634
+ # Setup device and mode
635
+ model = _setup_device_and_mode(model, device, eval_mode)
636
+
637
+ return model
638
+
639
+
640
+ def download_ckpt_from_hf():
641
+ SAM3_MODEL_ID = "facebook/sam3"
642
+ SAM3_CKPT_NAME = "sam3.pt"
643
+ SAM3_CFG_NAME = "config.json"
644
+ _ = hf_hub_download(repo_id=SAM3_MODEL_ID, filename=SAM3_CFG_NAME)
645
+ checkpoint_path = hf_hub_download(repo_id=SAM3_MODEL_ID, filename=SAM3_CKPT_NAME)
646
+ return checkpoint_path
647
+
648
+
649
+ def build_sam3_video_model(
650
+ checkpoint_path: Optional[str] = None,
651
+ load_from_HF=True,
652
+ bpe_path: Optional[str] = None,
653
+ has_presence_token: bool = True,
654
+ geo_encoder_use_img_cross_attn: bool = True,
655
+ strict_state_dict_loading: bool = True,
656
+ apply_temporal_disambiguation: bool = True,
657
+ device="cuda" if torch.cuda.is_available() else "cpu",
658
+ compile=False,
659
+ ) -> Sam3VideoInferenceWithInstanceInteractivity:
660
+ """
661
+ Build SAM3 dense tracking model.
662
+
663
+ Args:
664
+ checkpoint_path: Optional path to checkpoint file
665
+ bpe_path: Path to the BPE tokenizer file
666
+
667
+ Returns:
668
+ Sam3VideoInferenceWithInstanceInteractivity: The instantiated dense tracking model
669
+ """
670
+ if bpe_path is None:
671
+ bpe_path = os.path.join(
672
+ os.path.dirname(__file__), "..", "assets", "bpe_simple_vocab_16e6.txt.gz"
673
+ )
674
+
675
+ # Build Tracker module
676
+ tracker = build_tracker(apply_temporal_disambiguation=apply_temporal_disambiguation)
677
+
678
+ # Build Detector components
679
+ visual_neck = _create_vision_backbone()
680
+ text_encoder = _create_text_encoder(bpe_path)
681
+ backbone = SAM3VLBackbone(scalp=1, visual=visual_neck, text=text_encoder)
682
+ transformer = _create_sam3_transformer(has_presence_token=has_presence_token)
683
+ segmentation_head: UniversalSegmentationHead = _create_segmentation_head()
684
+ input_geometry_encoder = _create_geometry_encoder()
685
+
686
+ # Create main dot product scoring
687
+ main_dot_prod_mlp = MLP(
688
+ input_dim=256,
689
+ hidden_dim=2048,
690
+ output_dim=256,
691
+ num_layers=2,
692
+ dropout=0.1,
693
+ residual=True,
694
+ out_norm=nn.LayerNorm(256),
695
+ )
696
+ main_dot_prod_scoring = DotProductScoring(
697
+ d_model=256, d_proj=256, prompt_mlp=main_dot_prod_mlp
698
+ )
699
+
700
+ # Build Detector module
701
+ detector = Sam3ImageOnVideoMultiGPU(
702
+ num_feature_levels=1,
703
+ backbone=backbone,
704
+ transformer=transformer,
705
+ segmentation_head=segmentation_head,
706
+ semantic_segmentation_head=None,
707
+ input_geometry_encoder=input_geometry_encoder,
708
+ use_early_fusion=True,
709
+ use_dot_prod_scoring=True,
710
+ dot_prod_scoring=main_dot_prod_scoring,
711
+ supervise_joint_box_scores=has_presence_token,
712
+ )
713
+
714
+ # Build the main SAM3 video model
715
+ if apply_temporal_disambiguation:
716
+ model = Sam3VideoInferenceWithInstanceInteractivity(
717
+ detector=detector,
718
+ tracker=tracker,
719
+ score_threshold_detection=0.5,
720
+ assoc_iou_thresh=0.1,
721
+ det_nms_thresh=0.1,
722
+ new_det_thresh=0.7,
723
+ hotstart_delay=15,
724
+ hotstart_unmatch_thresh=8,
725
+ hotstart_dup_thresh=8,
726
+ suppress_unmatched_only_within_hotstart=True,
727
+ min_trk_keep_alive=-1,
728
+ max_trk_keep_alive=30,
729
+ init_trk_keep_alive=30,
730
+ suppress_overlapping_based_on_recent_occlusion_threshold=0.7,
731
+ suppress_det_close_to_boundary=False,
732
+ fill_hole_area=16,
733
+ recondition_every_nth_frame=16,
734
+ masklet_confirmation_enable=False,
735
+ decrease_trk_keep_alive_for_empty_masklets=False,
736
+ image_size=1008,
737
+ image_mean=(0.5, 0.5, 0.5),
738
+ image_std=(0.5, 0.5, 0.5),
739
+ compile_model=compile,
740
+ )
741
+ else:
742
+ # a version without any heuristics for ablation studies
743
+ model = Sam3VideoInferenceWithInstanceInteractivity(
744
+ detector=detector,
745
+ tracker=tracker,
746
+ score_threshold_detection=0.5,
747
+ assoc_iou_thresh=0.1,
748
+ det_nms_thresh=0.1,
749
+ new_det_thresh=0.7,
750
+ hotstart_delay=0,
751
+ hotstart_unmatch_thresh=0,
752
+ hotstart_dup_thresh=0,
753
+ suppress_unmatched_only_within_hotstart=True,
754
+ min_trk_keep_alive=-1,
755
+ max_trk_keep_alive=30,
756
+ init_trk_keep_alive=30,
757
+ suppress_overlapping_based_on_recent_occlusion_threshold=0.7,
758
+ suppress_det_close_to_boundary=False,
759
+ fill_hole_area=16,
760
+ recondition_every_nth_frame=0,
761
+ masklet_confirmation_enable=False,
762
+ decrease_trk_keep_alive_for_empty_masklets=False,
763
+ image_size=1008,
764
+ image_mean=(0.5, 0.5, 0.5),
765
+ image_std=(0.5, 0.5, 0.5),
766
+ compile_model=compile,
767
+ )
768
+
769
+ # Load checkpoint if provided
770
+ if load_from_HF and checkpoint_path is None:
771
+ checkpoint_path = download_ckpt_from_hf()
772
+ if checkpoint_path is not None:
773
+ with g_pathmgr.open(checkpoint_path, "rb") as f:
774
+ ckpt = torch.load(f, map_location="cpu", weights_only=True)
775
+ if "model" in ckpt and isinstance(ckpt["model"], dict):
776
+ ckpt = ckpt["model"]
777
+
778
+ missing_keys, unexpected_keys = model.load_state_dict(
779
+ ckpt, strict=strict_state_dict_loading
780
+ )
781
+ if missing_keys:
782
+ print(f"Missing keys: {missing_keys}")
783
+ if unexpected_keys:
784
+ print(f"Unexpected keys: {unexpected_keys}")
785
+
786
+ model.to(device=device)
787
+ return model
788
+
789
+
790
+ def build_sam3_video_predictor(*model_args, gpus_to_use=None, **model_kwargs):
791
+ return Sam3VideoPredictorMultiGPU(
792
+ *model_args, gpus_to_use=gpus_to_use, **model_kwargs
793
+ )
source_code/sam3/sam3/perflib/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
2
+
3
+ import os
4
+
5
+ is_enabled = False
6
+ if os.getenv("USE_PERFLIB", "1") == "1":
7
+ # print("Enabled the use of perflib.\n", end="")
8
+ is_enabled = True
source_code/sam3/sam3/perflib/compile.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
2
+
3
+ import torch
4
+
5
+
6
+ def recursive_fn_factory(fn):
7
+ def recursive_fn(b):
8
+ if isinstance(b, dict):
9
+ return {k: recursive_fn(b[k]) for k in b}
10
+ if isinstance(b, list):
11
+ return [recursive_fn(t) for t in b]
12
+ if isinstance(b, tuple):
13
+ return tuple(recursive_fn(t) for t in b)
14
+ if isinstance(b, torch.Tensor):
15
+ return fn(b)
16
+ # Yes, writing out an explicit white list of
17
+ # trivial types is tedious, but so are bugs that
18
+ # come from not applying fn, when expected to have
19
+ # applied it.
20
+ if b is None:
21
+ return b
22
+ trivial_types = [bool, int]
23
+ for t in trivial_types:
24
+ if isinstance(b, t):
25
+ return b
26
+ raise TypeError(f"Unexpected type {type(b)}")
27
+
28
+ return recursive_fn
29
+
30
+
31
+ recursive_contiguous = recursive_fn_factory(lambda x: x.contiguous())
32
+ recursive_clone = recursive_fn_factory(torch.clone)
33
+
34
+
35
+ def compile_wrapper(
36
+ fn, *, mode="max-autotune", fullgraph=True, dynamic=False, name=None
37
+ ):
38
+ compiled_fn = torch.compile(fn, mode=mode, fullgraph=fullgraph, dynamic=dynamic)
39
+
40
+ def compiled_fn_wrapper(*args, **kwargs):
41
+ with torch.autograd.profiler.record_function(
42
+ f"compiled {fn}" if name is None else name
43
+ ):
44
+ cont_args = recursive_contiguous(args)
45
+ cont_kwargs = recursive_contiguous(kwargs)
46
+ result = compiled_fn(*cont_args, **cont_kwargs)
47
+ cloned_result = recursive_clone(result)
48
+ return cloned_result
49
+
50
+ return compiled_fn_wrapper
51
+
52
+
53
+ def shape_logging_wrapper(fn, keep_kwargs, enable_logging=False):
54
+ """
55
+ Wraps a function and prints the shapes of all tensor inputs.
56
+ Only prints when a new combination of shapes is seen.
57
+ Thread-safe.
58
+
59
+ Args:
60
+ fn: Function to wrap
61
+ enable_logging: Boolean flag to enable/disable logging
62
+ """
63
+ seen_shapes = set()
64
+
65
+ def get_shape(obj):
66
+ if isinstance(obj, torch.Tensor):
67
+ return obj.shape
68
+ elif isinstance(obj, (list, tuple)):
69
+ if len(obj) > 1:
70
+ return tuple(get_shape(x) for x in obj)
71
+ return get_shape(obj[0])
72
+ elif isinstance(obj, dict):
73
+ return tuple(sorted((k, get_shape(v)) for k, v in obj.items()))
74
+ else:
75
+ return type(obj).__name__
76
+
77
+ def wrapper(*args, **kwargs):
78
+ shapes = tuple(get_shape(arg) for arg in args) + tuple(
79
+ (k, get_shape(v))
80
+ for k, v in kwargs.items()
81
+ if isinstance(v, (torch.Tensor, list))
82
+ and (len(keep_kwargs) > 0 and k in keep_kwargs)
83
+ )
84
+ if shapes not in seen_shapes:
85
+ seen_shapes.add(shapes)
86
+ if enable_logging:
87
+ print(f"[ShapeLogger] New input shapes for {fn.__qualname__}: {shapes}")
88
+ return fn(*args, **kwargs)
89
+
90
+ # Allow toggling the flag at runtime
91
+ wrapper.enable_logging = enable_logging
92
+
93
+ def set_logging(enabled=False):
94
+ nonlocal enable_logging
95
+ enable_logging = enabled
96
+ wrapper.enable_logging = enable_logging
97
+
98
+ wrapper.set_logging = set_logging
99
+ return wrapper
source_code/sam3/sam3/perflib/fa3.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
2
+
3
+ import torch
4
+
5
+
6
+ @torch.library.custom_op("flash::flash_attn_func", mutates_args=())
7
+ def flash_attn_func_op(
8
+ q: torch.Tensor, k: torch.Tensor, v: torch.Tensor
9
+ ) -> torch.Tensor:
10
+ from flash_attn_interface import flash_attn_func as fa3
11
+
12
+ return fa3(q, k, v)
13
+
14
+
15
+ def flash_attn_func(q, k, v):
16
+ dtype = torch.float8_e4m3fn
17
+ return flash_attn_func_op(q.to(dtype), k.to(dtype), v.to(dtype)).to(q.dtype)
18
+
19
+
20
+ @flash_attn_func_op.register_fake
21
+ def _(q, k, v, **kwargs):
22
+ # two outputs:
23
+ # 1. output: (batch, seq_len, num_heads, head_dim)
24
+ # 2. softmax_lse: (batch, num_heads, seq_len) with dtype=torch.float32
25
+ # output needs to be bfloat16, not float8!
26
+ meta_q = torch.empty_like(q, dtype=torch.bfloat16).contiguous()
27
+ return meta_q
source_code/sam3/sam3/perflib/masks_ops.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
2
+
3
+ import torch
4
+
5
+
6
+ def masks_to_boxes(masks: torch.Tensor, obj_ids: list[int]):
7
+ with torch.autograd.profiler.record_function("perflib: masks_to_boxes"):
8
+ # Sanity check based on callsite for replacement
9
+ assert masks.shape[0] == len(obj_ids)
10
+ assert masks.dim() == 3
11
+
12
+ # Based on torchvision masks_to_boxes
13
+ if masks.numel() == 0:
14
+ return torch.zeros((0, 4), device=masks.device, dtype=torch.float)
15
+
16
+ N, H, W = masks.shape
17
+ device = masks.device
18
+ y = torch.arange(H, device=device).view(1, H)
19
+ x = torch.arange(W, device=device).view(1, W)
20
+
21
+ masks_with_obj = masks != 0 # N, H, W
22
+ masks_with_obj_x = masks_with_obj.amax(
23
+ dim=1
24
+ ) # N, H (which columns have objects)
25
+ masks_with_obj_y = masks_with_obj.amax(dim=2) # N, W (which rows have objects)
26
+ masks_without_obj_x = ~masks_with_obj_x
27
+ masks_without_obj_y = ~masks_with_obj_y
28
+
29
+ bounding_boxes_0 = torch.amin(
30
+ (masks_without_obj_x * W) + (masks_with_obj_x * x), dim=1
31
+ )
32
+ bounding_boxes_1 = torch.amin(
33
+ (masks_without_obj_y * H) + (masks_with_obj_y * y), dim=1
34
+ )
35
+ bounding_boxes_2 = torch.amax(masks_with_obj_x * x, dim=1)
36
+ bounding_boxes_3 = torch.amax(masks_with_obj_y * y, dim=1)
37
+
38
+ bounding_boxes = torch.stack(
39
+ [bounding_boxes_0, bounding_boxes_1, bounding_boxes_2, bounding_boxes_3],
40
+ dim=1,
41
+ ).to(dtype=torch.float)
42
+ assert bounding_boxes.shape == (N, 4)
43
+ assert bounding_boxes.device == masks.device
44
+ assert bounding_boxes.dtype == torch.float
45
+ return bounding_boxes
46
+
47
+
48
+ def mask_iou(pred_masks: torch.Tensor, gt_masks: torch.Tensor) -> torch.Tensor:
49
+ """
50
+ Compute the IoU (Intersection over Union) between predicted masks and ground truth masks.
51
+ Args:
52
+ - pred_masks: (N, H, W) bool Tensor, containing binary predicted segmentation masks
53
+ - gt_masks: (M, H, W) bool Tensor, containing binary ground truth segmentation masks
54
+ Returns:
55
+ - ious: (N, M) float Tensor, containing IoUs for each pair of predicted and ground truth masks
56
+ """
57
+ assert pred_masks.dtype == gt_masks.dtype == torch.bool
58
+ N, H, W = pred_masks.shape
59
+ M, _, _ = gt_masks.shape
60
+
61
+ # Flatten masks: (N, 1, H*W) and (1, M, H*W)
62
+ pred_flat = pred_masks.view(N, 1, H * W)
63
+ gt_flat = gt_masks.view(1, M, H * W)
64
+
65
+ # Compute intersection and union: (N, M)
66
+ intersection = (pred_flat & gt_flat).sum(dim=2).float()
67
+ union = (pred_flat | gt_flat).sum(dim=2).float()
68
+ ious = intersection / union.clamp(min=1)
69
+ return ious # shape: (N, M)
source_code/sam3/sam3/perflib/nms.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
2
+
3
+ import logging
4
+
5
+ import numpy as np
6
+ import torch
7
+
8
+ from sam3.perflib.masks_ops import mask_iou
9
+
10
+
11
+ try:
12
+ from torch_generic_nms import generic_nms as generic_nms_cuda
13
+
14
+ GENERIC_NMS_AVAILABLE = True
15
+ except ImportError:
16
+ logging.debug(
17
+ "Falling back to triton or CPU mask NMS implementation -- please install `torch_generic_nms` via\n\t"
18
+ 'pip uninstall -y torch_generic_nms; TORCH_CUDA_ARCH_LIST="8.0 9.0" pip install git+https://github.com/ronghanghu/torch_generic_nms'
19
+ )
20
+ GENERIC_NMS_AVAILABLE = False
21
+
22
+
23
+ def nms_masks(
24
+ pred_probs: torch.Tensor,
25
+ pred_masks: torch.Tensor,
26
+ prob_threshold: float,
27
+ iou_threshold: float,
28
+ ) -> torch.Tensor:
29
+ """
30
+ Args:
31
+ - pred_probs: (num_det,) float Tensor, containing the score (probability) of each detection
32
+ - pred_masks: (num_det, H_mask, W_mask) float Tensor, containing the binary segmentation mask of each detection
33
+ - prob_threshold: float, score threshold to prefilter detections (NMS is performed on detections above threshold)
34
+ - iou_threshold: float, mask IoU threshold for NMS
35
+
36
+ Returns:
37
+ - keep: (num_det,) bool Tensor, indicating whether each detection is kept after score thresholding + NMS
38
+ """
39
+ # prefilter the detections with prob_threshold ("valid" are those above prob_threshold)
40
+ is_valid = pred_probs > prob_threshold # (num_det,)
41
+ probs = pred_probs[is_valid] # (num_valid,)
42
+ masks_binary = pred_masks[is_valid] > 0 # (num_valid, H_mask, W_mask)
43
+ if probs.numel() == 0:
44
+ return is_valid # no valid detection, return empty keep mask
45
+
46
+ ious = mask_iou(masks_binary, masks_binary) # (num_valid, num_valid)
47
+ kept_inds = generic_nms(ious, probs, iou_threshold)
48
+
49
+ # valid_inds are the indices among `probs` of valid detections before NMS (or -1 for invalid)
50
+ valid_inds = torch.where(is_valid, is_valid.cumsum(dim=0) - 1, -1) # (num_det,)
51
+ keep = torch.isin(valid_inds, kept_inds) # (num_det,)
52
+ return keep
53
+
54
+
55
+ def generic_nms(
56
+ ious: torch.Tensor, scores: torch.Tensor, iou_threshold=0.5
57
+ ) -> torch.Tensor:
58
+ """A generic version of `torchvision.ops.nms` that takes a pairwise IoU matrix."""
59
+
60
+ assert ious.dim() == 2 and ious.size(0) == ious.size(1)
61
+ assert scores.dim() == 1 and scores.size(0) == ious.size(0)
62
+
63
+ if ious.is_cuda:
64
+ if GENERIC_NMS_AVAILABLE:
65
+ return generic_nms_cuda(ious, scores, iou_threshold, use_iou_matrix=True)
66
+ else:
67
+ from sam3.perflib.triton.nms import nms_triton
68
+
69
+ return nms_triton(ious, scores, iou_threshold)
70
+
71
+ return generic_nms_cpu(ious, scores, iou_threshold)
72
+
73
+
74
+ def generic_nms_cpu(
75
+ ious: torch.Tensor, scores: torch.Tensor, iou_threshold=0.5
76
+ ) -> torch.Tensor:
77
+ """
78
+ A generic version of `torchvision.ops.nms` that takes a pairwise IoU matrix. (CPU implementation
79
+ based on https://github.com/jwyang/faster-rcnn.pytorch/blob/master/lib/model/nms/nms_cpu.py)
80
+ """
81
+ ious_np = ious.float().detach().cpu().numpy()
82
+ scores_np = scores.float().detach().cpu().numpy()
83
+ order = scores_np.argsort()[::-1]
84
+ kept_inds = []
85
+ while order.size > 0:
86
+ i = order.item(0)
87
+ kept_inds.append(i)
88
+ inds = np.where(ious_np[i, order[1:]] <= iou_threshold)[0]
89
+ order = order[inds + 1]
90
+
91
+ return torch.tensor(kept_inds, dtype=torch.int64, device=scores.device)
source_code/sam3/sam3/perflib/tests/tests.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
2
+
3
+ import os
4
+
5
+ import numpy as np
6
+ import pytest
7
+ import torch
8
+ from PIL import Image
9
+ from sam3.perflib.masks_ops import masks_to_boxes
10
+
11
+
12
+ class TestMasksToBoxes:
13
+ def test_masks_box(self):
14
+ def masks_box_check(masks, expected, atol=1e-4):
15
+ out = masks_to_boxes(masks, [1 for _ in range(masks.shape[0])])
16
+ assert out.dtype == torch.float
17
+ print("out: ", out)
18
+ print("expected: ", expected)
19
+ torch.testing.assert_close(
20
+ out, expected, rtol=0.0, check_dtype=True, atol=atol
21
+ )
22
+
23
+ # Check for int type boxes.
24
+ def _get_image():
25
+ assets_directory = os.path.join(
26
+ os.path.dirname(os.path.abspath(__file__)), "assets"
27
+ )
28
+ mask_path = os.path.join(assets_directory, "masks.tiff")
29
+ image = Image.open(mask_path)
30
+ return image
31
+
32
+ def _create_masks(image, masks):
33
+ for index in range(image.n_frames):
34
+ image.seek(index)
35
+ frame = np.array(image)
36
+ masks[index] = torch.tensor(frame)
37
+
38
+ return masks
39
+
40
+ expected = torch.tensor(
41
+ [
42
+ [127, 2, 165, 40],
43
+ [2, 50, 44, 92],
44
+ [56, 63, 98, 100],
45
+ [139, 68, 175, 104],
46
+ [160, 112, 198, 145],
47
+ [49, 138, 99, 182],
48
+ [108, 148, 152, 213],
49
+ ],
50
+ dtype=torch.float,
51
+ )
52
+
53
+ image = _get_image()
54
+ for dtype in [torch.float16, torch.float32, torch.float64]:
55
+ masks = torch.zeros(
56
+ (image.n_frames, image.height, image.width), dtype=dtype
57
+ )
58
+ masks = _create_masks(image, masks)
59
+ masks_box_check(masks, expected)
source_code/sam3/sam3/perflib/triton/connected_components.py ADDED
@@ -0,0 +1,468 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
2
+ import math
3
+
4
+ import torch
5
+ import triton
6
+ import triton.language as tl
7
+
8
+
9
+ @triton.jit
10
+ def _any_combine(a, b):
11
+ return a | b
12
+
13
+
14
+ @triton.jit
15
+ def tl_any(a, dim=0):
16
+ return tl.reduce(a, dim, _any_combine)
17
+
18
+
19
+ # ==============================================================================
20
+ # ## Phase 1: Initialization Kernel
21
+ # ==============================================================================
22
+ # Each foreground pixel (value > 0) gets a unique label equal to its
23
+ # linear index. Background pixels (value == 0) get a sentinel label of -1.
24
+ # Note that the indexing is done across batch boundaries for simplicity
25
+ # (i.e., the first pixel of image 1 gets label H*W, etc.)
26
+
27
+
28
+ @triton.jit
29
+ def _init_labels_kernel(
30
+ input_ptr, labels_ptr, numel: tl.constexpr, BLOCK_SIZE: tl.constexpr
31
+ ):
32
+ pid = tl.program_id(0)
33
+ offsets = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
34
+ mask = offsets < numel
35
+ input_values = tl.load(input_ptr + offsets, mask=mask, other=0)
36
+
37
+ indices = tl.where((input_values != 0), offsets, -1)
38
+ tl.store(labels_ptr + offsets, indices, mask=mask)
39
+
40
+
41
+ # ==============================================================================
42
+ # ## Phase 2: Local merging
43
+ # ==============================================================================
44
+ # Each pixel tries to merge with its 8-connected neighbors (up, down, left, right)
45
+ # if they have the same value. This is done using a disjoint-set union operation.
46
+
47
+
48
+ @triton.jit
49
+ def find(labels_ptr, indices, mask):
50
+ current_pids = indices
51
+
52
+ # 'is_done' tracks lanes that have finished their work.
53
+ # A lane is initially "done" if it's not active (mask is False).
54
+ is_done = ~mask
55
+
56
+ # Loop as long as there is at least one lane that is NOT done.
57
+ while tl_any(~is_done):
58
+ # The work_mask is for lanes that are still active and seeking their root.
59
+ work_mask = ~is_done
60
+ parents = tl.load(labels_ptr + current_pids, mask=work_mask, other=-1)
61
+ # A lane is now done if its parent is itself (it's a root)
62
+ # or if it hits a -1 sentinel (a safe exit condition).
63
+ is_root = parents == current_pids
64
+ is_sentinel = parents == -1
65
+ is_done |= is_root | is_sentinel
66
+
67
+ # For lanes that are not yet done, update their pid to their parent to continue traversal.
68
+ current_pids = tl.where(is_done, current_pids, parents)
69
+ # We could add the following line to do path compression, but experimentally it's slower
70
+ # tl.atomic_min(labels_ptr + indices, current_pids, mask=mask)
71
+ return current_pids
72
+
73
+
74
+ @triton.jit
75
+ def union(labels_ptr, a, b, process_mask):
76
+ # This function implements a disjoint-set union
77
+ # As an invariant, we use the fact that the roots have the lower id. That helps parallelization
78
+ # However, that is not sufficient by itself. Suppose two threads want to do union(0,2) and union(1,2) at the same time
79
+ # Then if we do a naive atomic_min, 0 and 1 will compete to be the new parent of 2 and min(0, 1) will win.
80
+ # However, 1 still needs to be merged with the new {0, 2} component.
81
+ # To ensure that merge is also done, we need to detect whether the merge was successful, and if not retry until it is
82
+
83
+ current_a = a
84
+ current_b = b
85
+
86
+ final_root = a
87
+ # A mask to track which lanes have successfully completed their union.
88
+ done_mask = ~process_mask # tl.zeros_like(a) == 1 # Init with all False
89
+
90
+ while tl_any(~done_mask):
91
+ # Define the mask for lanes that still need work in this iteration
92
+ work_mask = process_mask & ~done_mask
93
+
94
+ # Find the roots for the current a and b values in the active lanes
95
+ root_a = find(labels_ptr, current_a, work_mask)
96
+ tl.debug_barrier()
97
+ root_b = find(labels_ptr, current_b, work_mask)
98
+
99
+ # 7. Merge logic
100
+ # If roots are already the same, the sets are already merged. Mark as done.
101
+ are_equal = root_a == root_b
102
+ final_root = tl.where(are_equal & work_mask & ~done_mask, root_a, final_root)
103
+ done_mask |= are_equal & work_mask
104
+
105
+ # Define masks for the two merge cases (a < b or b < a)
106
+ a_is_smaller = root_a < root_b
107
+
108
+ # Case 1: root_a < root_b. Attempt to set parent[root_b] = root_a
109
+ merge_mask_a_smaller = work_mask & a_is_smaller & ~are_equal
110
+ ptr_b = labels_ptr + root_b
111
+ old_val_b = tl.atomic_min(ptr_b, root_a, mask=merge_mask_a_smaller)
112
+
113
+ # A lane is done if its atomic op was successful (old value was what we expected)
114
+ success_b = old_val_b == root_b
115
+ final_root = tl.where(success_b & work_mask & ~done_mask, root_a, final_root)
116
+ done_mask |= success_b & merge_mask_a_smaller
117
+
118
+ # *** Crucial Retry Logic ***
119
+ # If the update failed (old_val_b != root_b), another thread interfered.
120
+ # We update `current_b` to this new root (`old_val_b`) and will retry in the next loop iteration.
121
+ current_b = tl.where(success_b | ~merge_mask_a_smaller, current_b, old_val_b)
122
+
123
+ # Case 2: root_b < root_a. Attempt to set parent[root_a] = root_b
124
+ merge_mask_b_smaller = work_mask & ~a_is_smaller & ~are_equal
125
+ ptr_a = labels_ptr + root_a
126
+ old_val_a = tl.atomic_min(ptr_a, root_b, mask=merge_mask_b_smaller)
127
+
128
+ success_a = old_val_a == root_a
129
+ final_root = tl.where(success_a & work_mask & ~done_mask, root_b, final_root)
130
+ done_mask |= success_a & merge_mask_b_smaller
131
+
132
+ # *** Crucial Retry Logic ***
133
+ # Similarly, update `current_a` if the atomic operation failed.
134
+ current_a = tl.where(success_a | ~merge_mask_b_smaller, current_a, old_val_a)
135
+
136
+ return final_root
137
+
138
+
139
+ @triton.jit
140
+ def _merge_helper(
141
+ input_ptr,
142
+ labels_ptr,
143
+ base_offset,
144
+ offsets_h,
145
+ offsets_w,
146
+ mask_2d,
147
+ valid_current,
148
+ current_values,
149
+ current_labels,
150
+ H,
151
+ W,
152
+ dx: tl.constexpr,
153
+ dy: tl.constexpr,
154
+ ):
155
+ # Helper functions to compute merge with a specific neighbor offset (dx, dy)
156
+
157
+ neighbor_h = offsets_h + dy
158
+ neighbor_w = offsets_w + dx
159
+ # Proper bounds checking: all four bounds must be satisfied
160
+ mask_n = (
161
+ mask_2d
162
+ & (neighbor_h[:, None] >= 0)
163
+ & (neighbor_h[:, None] < H)
164
+ & (neighbor_w[None, :] >= 0)
165
+ & (neighbor_w[None, :] < W)
166
+ )
167
+
168
+ offsets_neighbor = neighbor_h[:, None] * W + neighbor_w[None, :]
169
+ neighbor_values = tl.load(
170
+ input_ptr + base_offset + offsets_neighbor, mask=mask_n, other=-1
171
+ )
172
+
173
+ mask_n = tl.ravel(mask_n)
174
+ neighbor_labels = tl.load(
175
+ labels_ptr + tl.ravel(base_offset + offsets_neighbor), mask=mask_n, other=-1
176
+ )
177
+
178
+ to_merge = (
179
+ mask_n & (neighbor_labels != -1) & tl.ravel(current_values == neighbor_values)
180
+ )
181
+ valid_write = valid_current & to_merge
182
+
183
+ # returns new parents for the pixels that were merged (otherwise keeps current labels)
184
+ return tl.where(
185
+ valid_write,
186
+ union(labels_ptr, current_labels, neighbor_labels, valid_write),
187
+ current_labels,
188
+ )
189
+
190
+
191
+ @triton.autotune(
192
+ configs=[
193
+ triton.Config(
194
+ {"BLOCK_SIZE_H": 4, "BLOCK_SIZE_W": 16}, num_stages=1, num_warps=2
195
+ ),
196
+ triton.Config(
197
+ {"BLOCK_SIZE_H": 4, "BLOCK_SIZE_W": 32}, num_stages=2, num_warps=4
198
+ ),
199
+ ],
200
+ key=["H", "W"],
201
+ restore_value=["labels_ptr"],
202
+ )
203
+ @triton.jit
204
+ def _local_prop_kernel(
205
+ labels_ptr,
206
+ input_ptr,
207
+ H: tl.constexpr,
208
+ W: tl.constexpr,
209
+ BLOCK_SIZE_H: tl.constexpr,
210
+ BLOCK_SIZE_W: tl.constexpr,
211
+ ):
212
+ # This is the meat of the Phase 2 to do local merging
213
+ # It will be launched with a 2D grid:
214
+ # - dim 0: batch index
215
+ # - dim 1: block index over HxW image (2D tiling)
216
+ pid_b = tl.program_id(0)
217
+ pid_hw = tl.program_id(1)
218
+
219
+ # Calculate offsets for the core block
220
+ offsets_h = (pid_hw // tl.cdiv(W, BLOCK_SIZE_W)) * BLOCK_SIZE_H + tl.arange(
221
+ 0, BLOCK_SIZE_H
222
+ )
223
+ offsets_w = (pid_hw % tl.cdiv(W, BLOCK_SIZE_W)) * BLOCK_SIZE_W + tl.arange(
224
+ 0, BLOCK_SIZE_W
225
+ )
226
+
227
+ base_offset = pid_b * H * W
228
+ offsets_2d = offsets_h[:, None] * W + offsets_w[None, :]
229
+ mask_2d = (offsets_h[:, None] < H) & (offsets_w[None, :] < W)
230
+ mask_1d = tl.ravel(mask_2d)
231
+
232
+ # Load the current labels for the block - these are parent pointers
233
+ current_labels = tl.load(
234
+ labels_ptr + tl.ravel(base_offset + offsets_2d), mask=mask_1d, other=-1
235
+ )
236
+ current_values = tl.load(
237
+ input_ptr + base_offset + offsets_2d, mask=mask_2d, other=-1
238
+ )
239
+ valid_current = mask_1d & (current_labels != -1)
240
+
241
+ # Horizontal merge
242
+ current_labels = _merge_helper(
243
+ input_ptr,
244
+ labels_ptr,
245
+ base_offset,
246
+ offsets_h,
247
+ offsets_w,
248
+ mask_2d,
249
+ valid_current,
250
+ current_values,
251
+ current_labels,
252
+ H,
253
+ W,
254
+ -1,
255
+ 0,
256
+ )
257
+ # Vertical merge
258
+ current_labels = _merge_helper(
259
+ input_ptr,
260
+ labels_ptr,
261
+ base_offset,
262
+ offsets_h,
263
+ offsets_w,
264
+ mask_2d,
265
+ valid_current,
266
+ current_values,
267
+ current_labels,
268
+ H,
269
+ W,
270
+ 0,
271
+ -1,
272
+ )
273
+ # Diagonal merges
274
+ current_labels = _merge_helper(
275
+ input_ptr,
276
+ labels_ptr,
277
+ base_offset,
278
+ offsets_h,
279
+ offsets_w,
280
+ mask_2d,
281
+ valid_current,
282
+ current_values,
283
+ current_labels,
284
+ H,
285
+ W,
286
+ -1,
287
+ -1,
288
+ )
289
+ current_labels = _merge_helper(
290
+ input_ptr,
291
+ labels_ptr,
292
+ base_offset,
293
+ offsets_h,
294
+ offsets_w,
295
+ mask_2d,
296
+ valid_current,
297
+ current_values,
298
+ current_labels,
299
+ H,
300
+ W,
301
+ -1,
302
+ 1,
303
+ )
304
+
305
+ # This actually does some path compression, in a lightweight but beneficial way
306
+ tl.atomic_min(
307
+ labels_ptr + tl.ravel(base_offset + offsets_2d), current_labels, mask=mask_1d
308
+ )
309
+
310
+
311
+ # ==============================================================================
312
+ # ## Phase 3: Pointer Jumping Kernel
313
+ # ==============================================================================
314
+ # This kernel performs pointer jumping to ensure that all pixels point directly to their root labels.
315
+ # This is done in a loop until convergence.
316
+
317
+
318
+ @triton.jit
319
+ def _pointer_jump_kernel(
320
+ labels_in_ptr, labels_out_ptr, numel: tl.constexpr, BLOCK_SIZE: tl.constexpr
321
+ ):
322
+ """
323
+ Pointer jumping kernel with double buffering to avoid race conditions.
324
+ Reads from labels_in_ptr and writes to labels_out_ptr.
325
+ """
326
+ # This kernel is launched with a 1D grid, and does not care about batching explicitly.
327
+ # By construction, the labels are global indices across the batch, and we never perform
328
+ # cross-batch merges, so this is safe.
329
+
330
+ pid = tl.program_id(0)
331
+ offsets = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
332
+ mask = offsets < numel
333
+
334
+ # Load current labels from input buffer
335
+ current_labels = tl.load(labels_in_ptr + offsets, mask=mask, other=-1)
336
+ valid_mask = mask & (current_labels != -1)
337
+
338
+ # A mask to track which lanes have successfully completed their union.
339
+ done_mask = ~valid_mask
340
+ while tl_any(~(done_mask | ~valid_mask)):
341
+ parent_labels = tl.load(
342
+ labels_in_ptr + current_labels, mask=valid_mask, other=-1
343
+ )
344
+
345
+ are_equal = current_labels == parent_labels
346
+ done_mask |= are_equal & valid_mask
347
+
348
+ current_labels = tl.where(
349
+ ~done_mask, tl.minimum(current_labels, parent_labels), current_labels
350
+ )
351
+
352
+ # Write to output buffer (safe because we're not reading from it)
353
+ tl.store(labels_out_ptr + offsets, current_labels, mask=mask)
354
+
355
+
356
+ # ==============================================================================
357
+ # ## Phase 4: Kernels for Computing Component Sizes
358
+ # ==============================================================================
359
+
360
+
361
+ # Step 4.1: Count occurrences of each root label using atomic adds.
362
+ @triton.jit
363
+ def _count_labels_kernel(labels_ptr, sizes_ptr, numel, BLOCK_SIZE: tl.constexpr):
364
+ pid = tl.program_id(0)
365
+ offsets = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
366
+ mask = offsets < numel
367
+
368
+ # Load the final, converged labels
369
+ labels = tl.load(labels_ptr + offsets, mask=mask, other=-1)
370
+ valid_mask = mask & (labels != -1)
371
+
372
+ # Atomically increment the counter for each label. This builds a histogram.
373
+ tl.atomic_add(sizes_ptr + labels, 1, mask=valid_mask)
374
+
375
+
376
+ # Step 4.2: Broadcast the computed sizes back to the output tensor.
377
+ @triton.jit
378
+ def _broadcast_sizes_kernel(
379
+ labels_ptr, sizes_ptr, out_ptr, numel, BLOCK_SIZE: tl.constexpr
380
+ ):
381
+ pid = tl.program_id(0)
382
+ offsets = pid * BLOCK_SIZE + tl.arange(0, BLOCK_SIZE)
383
+ mask = offsets < numel
384
+
385
+ # Load the final labels
386
+ labels = tl.load(labels_ptr + offsets, mask=mask, other=-1)
387
+ valid_mask = mask & (labels != -1)
388
+
389
+ # Look up the size for each label from the histogram
390
+ component_sizes = tl.load(sizes_ptr + labels, mask=valid_mask, other=0)
391
+
392
+ # Write the size to the final output tensor. Background pixels get size 0.
393
+ tl.store(out_ptr + offsets, component_sizes, mask=mask)
394
+
395
+
396
+ def connected_components_triton(input_tensor: torch.Tensor):
397
+ """
398
+ Computes connected components labeling on a batch of 2D integer tensors using Triton.
399
+
400
+ Args:
401
+ input_tensor (torch.Tensor): A BxHxW integer tensor or Bx1xHxW. Non-zero values are considered foreground. Bool tensor also accepted
402
+
403
+ Returns:
404
+ Tuple[torch.Tensor, int]: A tuple containing:
405
+ - A BxHxW output tensor with dense labels. Background is 0.
406
+ - A BxHxW tensor with the size of the connected component for each pixel.
407
+ """
408
+ assert (
409
+ input_tensor.is_cuda and input_tensor.is_contiguous()
410
+ ), "Input tensor must be a contiguous CUDA tensor."
411
+ out_shape = input_tensor.shape
412
+ if input_tensor.dim() == 4 and input_tensor.shape[1] == 1:
413
+ input_tensor = input_tensor.squeeze(1)
414
+ else:
415
+ assert (
416
+ input_tensor.dim() == 3
417
+ ), "Input tensor must be (B, H, W) or (B, 1, H, W)."
418
+
419
+ B, H, W = input_tensor.shape
420
+ numel = B * H * W
421
+ device = input_tensor.device
422
+
423
+ # --- Allocate Tensors ---
424
+ labels = torch.empty_like(input_tensor, dtype=torch.int32)
425
+ output = torch.empty_like(input_tensor, dtype=torch.int32)
426
+
427
+ # --- Phase 1 ---
428
+ BLOCK_SIZE = 256
429
+ grid_init = (triton.cdiv(numel, BLOCK_SIZE),)
430
+ _init_labels_kernel[grid_init](
431
+ input_tensor,
432
+ labels,
433
+ numel,
434
+ BLOCK_SIZE=BLOCK_SIZE,
435
+ )
436
+
437
+ # --- Phase 2 ---
438
+ grid_local_prop = lambda meta: (
439
+ B,
440
+ triton.cdiv(H, meta["BLOCK_SIZE_H"]) * triton.cdiv(W, meta["BLOCK_SIZE_W"]),
441
+ )
442
+ _local_prop_kernel[grid_local_prop](labels, input_tensor, H, W)
443
+
444
+ # --- Phase 3 ---
445
+ BLOCK_SIZE = 256
446
+ grid_jump = lambda meta: (triton.cdiv(numel, meta["BLOCK_SIZE"]),)
447
+ _pointer_jump_kernel[grid_jump](labels, output, numel, BLOCK_SIZE=BLOCK_SIZE)
448
+
449
+ # --- Phase 4 ---
450
+ # Allocate tensor to store the final output sizes
451
+ component_sizes_out = torch.empty_like(input_tensor, dtype=torch.int32)
452
+
453
+ # Allocate a temporary 1D tensor to act as the histogram
454
+ # Size is numel because labels can be up to numel-1
455
+ sizes_histogram = torch.zeros(numel, dtype=torch.int32, device=device)
456
+
457
+ # 4.1: Count the occurrences of each label
458
+ grid_count = (triton.cdiv(numel, BLOCK_SIZE),)
459
+ _count_labels_kernel[grid_count](
460
+ output, sizes_histogram, numel, BLOCK_SIZE=BLOCK_SIZE
461
+ )
462
+
463
+ # 2.2: Broadcast the counts to the final output tensor
464
+ grid_broadcast = (triton.cdiv(numel, BLOCK_SIZE),)
465
+ _broadcast_sizes_kernel[grid_broadcast](
466
+ output, sizes_histogram, component_sizes_out, numel, BLOCK_SIZE=BLOCK_SIZE
467
+ )
468
+ return output.view(out_shape) + 1, component_sizes_out.view(out_shape)
source_code/sam3/sam3/sam/prompt_encoder.py ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved
2
+
3
+ from typing import Any, Optional, Tuple, Type
4
+
5
+ import numpy as np
6
+ import torch
7
+ from torch import nn
8
+
9
+ from .common import LayerNorm2d
10
+
11
+
12
+ class PromptEncoder(nn.Module):
13
+ def __init__(
14
+ self,
15
+ embed_dim: int,
16
+ image_embedding_size: Tuple[int, int],
17
+ input_image_size: Tuple[int, int],
18
+ mask_in_chans: int,
19
+ activation: Type[nn.Module] = nn.GELU,
20
+ ) -> None:
21
+ """
22
+ Encodes prompts for input to SAM's mask decoder.
23
+
24
+ Arguments:
25
+ embed_dim (int): The prompts' embedding dimension
26
+ image_embedding_size (tuple(int, int)): The spatial size of the
27
+ image embedding, as (H, W).
28
+ input_image_size (int): The padded size of the image as input
29
+ to the image encoder, as (H, W).
30
+ mask_in_chans (int): The number of hidden channels used for
31
+ encoding input masks.
32
+ activation (nn.Module): The activation to use when encoding
33
+ input masks.
34
+ """
35
+ super().__init__()
36
+ self.embed_dim = embed_dim
37
+ self.input_image_size = input_image_size
38
+ self.image_embedding_size = image_embedding_size
39
+ self.pe_layer = PositionEmbeddingRandom(embed_dim // 2)
40
+
41
+ self.num_point_embeddings: int = 4 # pos/neg point + 2 box corners
42
+ point_embeddings = [
43
+ nn.Embedding(1, embed_dim) for i in range(self.num_point_embeddings)
44
+ ]
45
+ self.point_embeddings = nn.ModuleList(point_embeddings)
46
+ self.not_a_point_embed = nn.Embedding(1, embed_dim)
47
+
48
+ self.mask_input_size = (
49
+ 4 * image_embedding_size[0],
50
+ 4 * image_embedding_size[1],
51
+ )
52
+ self.mask_downscaling = nn.Sequential(
53
+ nn.Conv2d(1, mask_in_chans // 4, kernel_size=2, stride=2),
54
+ LayerNorm2d(mask_in_chans // 4),
55
+ activation(),
56
+ nn.Conv2d(mask_in_chans // 4, mask_in_chans, kernel_size=2, stride=2),
57
+ LayerNorm2d(mask_in_chans),
58
+ activation(),
59
+ nn.Conv2d(mask_in_chans, embed_dim, kernel_size=1),
60
+ )
61
+ self.no_mask_embed = nn.Embedding(1, embed_dim)
62
+
63
+ def get_dense_pe(self) -> torch.Tensor:
64
+ """
65
+ Returns the positional encoding used to encode point prompts,
66
+ applied to a dense set of points the shape of the image encoding.
67
+
68
+ Returns:
69
+ torch.Tensor: Positional encoding with shape
70
+ 1x(embed_dim)x(embedding_h)x(embedding_w)
71
+ """
72
+ return self.pe_layer(self.image_embedding_size).unsqueeze(0)
73
+
74
+ def _embed_points(
75
+ self,
76
+ points: torch.Tensor,
77
+ labels: torch.Tensor,
78
+ pad: bool,
79
+ ) -> torch.Tensor:
80
+ """Embeds point prompts."""
81
+ points = points + 0.5 # Shift to center of pixel
82
+ if pad:
83
+ padding_point = torch.zeros((points.shape[0], 1, 2), device=points.device)
84
+ padding_label = -torch.ones((labels.shape[0], 1), device=labels.device)
85
+ points = torch.cat([points, padding_point], dim=1)
86
+ labels = torch.cat([labels, padding_label], dim=1)
87
+ point_embedding = self.pe_layer.forward_with_coords(
88
+ points, self.input_image_size
89
+ )
90
+
91
+ point_embedding = torch.where(
92
+ (labels == -1).unsqueeze(-1),
93
+ torch.zeros_like(point_embedding) + self.not_a_point_embed.weight,
94
+ point_embedding,
95
+ )
96
+ point_embedding = torch.where(
97
+ (labels == 0).unsqueeze(-1),
98
+ point_embedding + self.point_embeddings[0].weight,
99
+ point_embedding,
100
+ )
101
+ point_embedding = torch.where(
102
+ (labels == 1).unsqueeze(-1),
103
+ point_embedding + self.point_embeddings[1].weight,
104
+ point_embedding,
105
+ )
106
+ point_embedding = torch.where(
107
+ (labels == 2).unsqueeze(-1),
108
+ point_embedding + self.point_embeddings[2].weight,
109
+ point_embedding,
110
+ )
111
+ point_embedding = torch.where(
112
+ (labels == 3).unsqueeze(-1),
113
+ point_embedding + self.point_embeddings[3].weight,
114
+ point_embedding,
115
+ )
116
+ return point_embedding
117
+
118
+ def _embed_boxes(self, boxes: torch.Tensor) -> torch.Tensor:
119
+ """Embeds box prompts."""
120
+ boxes = boxes + 0.5 # Shift to center of pixel
121
+ coords = boxes.reshape(-1, 2, 2)
122
+ corner_embedding = self.pe_layer.forward_with_coords(
123
+ coords, self.input_image_size
124
+ )
125
+ corner_embedding[:, 0, :] += self.point_embeddings[2].weight
126
+ corner_embedding[:, 1, :] += self.point_embeddings[3].weight
127
+ return corner_embedding
128
+
129
+ def _embed_masks(self, masks: torch.Tensor) -> torch.Tensor:
130
+ """Embeds mask inputs."""
131
+ mask_embedding = self.mask_downscaling(masks)
132
+ return mask_embedding
133
+
134
+ def _get_batch_size(
135
+ self,
136
+ points: Optional[Tuple[torch.Tensor, torch.Tensor]],
137
+ boxes: Optional[torch.Tensor],
138
+ masks: Optional[torch.Tensor],
139
+ ) -> int:
140
+ """
141
+ Gets the batch size of the output given the batch size of the input prompts.
142
+ """
143
+ if points is not None:
144
+ return points[0].shape[0]
145
+ elif boxes is not None:
146
+ return boxes.shape[0]
147
+ elif masks is not None:
148
+ return masks.shape[0]
149
+ else:
150
+ return 1
151
+
152
+ def _get_device(self) -> torch.device:
153
+ return self.point_embeddings[0].weight.device
154
+
155
+ def forward(
156
+ self,
157
+ points: Optional[Tuple[torch.Tensor, torch.Tensor]],
158
+ boxes: Optional[torch.Tensor],
159
+ masks: Optional[torch.Tensor],
160
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
161
+ """
162
+ Embeds different types of prompts, returning both sparse and dense
163
+ embeddings.
164
+
165
+ Arguments:
166
+ points (tuple(torch.Tensor, torch.Tensor) or none): point coordinates
167
+ and labels to embed.
168
+ boxes (torch.Tensor or none): boxes to embed
169
+ masks (torch.Tensor or none): masks to embed
170
+
171
+ Returns:
172
+ torch.Tensor: sparse embeddings for the points and boxes, with shape
173
+ BxNx(embed_dim), where N is determined by the number of input points
174
+ and boxes.
175
+ torch.Tensor: dense embeddings for the masks, in the shape
176
+ Bx(embed_dim)x(embed_H)x(embed_W)
177
+ """
178
+ bs = self._get_batch_size(points, boxes, masks)
179
+ sparse_embeddings = torch.empty(
180
+ (bs, 0, self.embed_dim), device=self._get_device()
181
+ )
182
+ if points is not None:
183
+ coords, labels = points
184
+ point_embeddings = self._embed_points(coords, labels, pad=(boxes is None))
185
+ sparse_embeddings = torch.cat([sparse_embeddings, point_embeddings], dim=1)
186
+ if boxes is not None:
187
+ box_embeddings = self._embed_boxes(boxes)
188
+ sparse_embeddings = torch.cat([sparse_embeddings, box_embeddings], dim=1)
189
+
190
+ if masks is not None:
191
+ dense_embeddings = self._embed_masks(masks)
192
+ else:
193
+ dense_embeddings = self.no_mask_embed.weight.reshape(1, -1, 1, 1).expand(
194
+ bs, -1, self.image_embedding_size[0], self.image_embedding_size[1]
195
+ )
196
+
197
+ return sparse_embeddings, dense_embeddings
198
+
199
+
200
+ class PositionEmbeddingRandom(nn.Module):
201
+ """
202
+ Positional encoding using random spatial frequencies.
203
+ """
204
+
205
+ def __init__(self, num_pos_feats: int = 64, scale: Optional[float] = None) -> None:
206
+ super().__init__()
207
+ if scale is None or scale <= 0.0:
208
+ scale = 1.0
209
+ self.register_buffer(
210
+ "positional_encoding_gaussian_matrix",
211
+ scale * torch.randn((2, num_pos_feats)),
212
+ )
213
+
214
+ def _pe_encoding(self, coords: torch.Tensor) -> torch.Tensor:
215
+ """Positionally encode points that are normalized to [0,1]."""
216
+ # assuming coords are in [0, 1]^2 square and have d_1 x ... x d_n x 2 shape
217
+ coords = 2 * coords - 1
218
+ coords = coords @ self.positional_encoding_gaussian_matrix
219
+ coords = 2 * np.pi * coords
220
+ # outputs d_1 x ... x d_n x C shape
221
+ return torch.cat([torch.sin(coords), torch.cos(coords)], dim=-1)
222
+
223
+ def forward(self, size: Tuple[int, int]) -> torch.Tensor:
224
+ """Generate positional encoding for a grid of the specified size."""
225
+ h, w = size
226
+ device: Any = self.positional_encoding_gaussian_matrix.device
227
+ grid = torch.ones((h, w), device=device, dtype=torch.float32)
228
+ y_embed = grid.cumsum(dim=0) - 0.5
229
+ x_embed = grid.cumsum(dim=1) - 0.5
230
+ y_embed = y_embed / h
231
+ x_embed = x_embed / w
232
+
233
+ pe = self._pe_encoding(torch.stack([x_embed, y_embed], dim=-1))
234
+ return pe.permute(2, 0, 1) # C x H x W
235
+
236
+ def forward_with_coords(
237
+ self, coords_input: torch.Tensor, image_size: Tuple[int, int]
238
+ ) -> torch.Tensor:
239
+ """Positionally encode points that are not normalized to [0,1]."""
240
+ coords = coords_input.clone()
241
+ coords[:, :, 0] = coords[:, :, 0] / image_size[1]
242
+ coords[:, :, 1] = coords[:, :, 1] / image_size[0]
243
+ return self._pe_encoding(coords.to(torch.float)) # B x N x C
source_code/sam3/sam3/train/configs/eval_base.yaml ADDED
@@ -0,0 +1,279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @package _global_
2
+ defaults:
3
+ - _self_
4
+
5
+ # This config is the base configuration for all evaluations. Amongst other things, it defines:
6
+ # - the model
7
+ # - the image transforms
8
+ # - the post processors
9
+ # - cluster configuration (only relevant for slurm-based evals, ignored otherwise)
10
+ #
11
+ # Most of the parameters should be kept as-is. The main modifications you may want to make are:
12
+ # - the cluster configuration, to adjust partitions/qos to your system
13
+ # - the flag gather_pred_via_filesys if you ram is tight
14
+ # - num_val_workers if your number of cores is small (should be roughly number of cores / number of gpus)
15
+ # - the paths below
16
+
17
+
18
+ # ============================================================================
19
+ # Paths Configuration (Chage this to your own paths)
20
+ # ============================================================================
21
+ paths:
22
+ # If you leave the checkpoint path to null, the model will be downloaded from hugging-face. Otherwise provide a path
23
+ checkpoint_path: null
24
+ # the experiments will be subfolders of this
25
+ base_experiment_log_dir: <YOUR EXPERIMENET LOG_DIR>
26
+
27
+ # base path to the annotation folder for gold (refer to the readmes on how to download)
28
+ base_annotation_path: <YOUR_GOLD_GT_DIR>
29
+
30
+ # base path to the annotation folder for silver (refer to the readmes on how to download)
31
+ base_annotation_path_silver: <YOUR_SILVER_GT_DIR>
32
+
33
+ # path to the metaclip images, used for SA-Co gold (refer to the readme for instructions). Can be null if you don't intend on evaluating on this dataset.
34
+ metaclip_img_path: <YOUR_METACLIP_IMG_DIR>
35
+
36
+ # path to the sa1b images, used for SA-Co gold (refer to the readme for instructions). Can be null if you don't intend on evaluating on this dataset.
37
+ sa1b_img_path: <YOUR_SA1B_IMG_DIR>
38
+
39
+ # path to the SA-Co/silver images
40
+ silver_img_path: <YOUR_SILVER_IMG_DIR>
41
+
42
+ bpe_path: <BPE_PATH> # This should be under assets/bpe_simple_vocab_16e6.txt.gz
43
+
44
+
45
+ # ============================================================================
46
+ # Different helper parameters and functions
47
+ # ============================================================================
48
+ scratch:
49
+
50
+ use_presence_eval: True
51
+
52
+ base_val_transform:
53
+ - _target_: sam3.train.transforms.basic_for_api.ComposeAPI
54
+ transforms:
55
+ ######## transforms for validation (begin) ########
56
+ - _target_: sam3.train.transforms.basic_for_api.RandomResizeAPI
57
+ sizes: ${scratch.resolution} # originally `resolution: 1024`
58
+ max_size:
59
+ _target_: sam3.train.transforms.basic.get_random_resize_max_size
60
+ size: ${scratch.resolution} # originally `resolution: 1024`
61
+ square: true
62
+ consistent_transform: False
63
+ ######## transforms for validation (end) ########
64
+ - _target_: sam3.train.transforms.basic_for_api.ToTensorAPI
65
+ - _target_: sam3.train.transforms.basic_for_api.NormalizeAPI
66
+ mean: ${scratch.val_norm_mean}
67
+ std: ${scratch.val_norm_std}
68
+
69
+ loss: null
70
+
71
+ # Model parameters
72
+ d_model: 256
73
+ input_box_embedding_dim: ${add:${scratch.d_model},2}
74
+
75
+ # Box processing
76
+ original_box_postprocessor:
77
+ _target_: sam3.eval.postprocessors.PostProcessImage
78
+ max_dets_per_img: -1 # infinite detections
79
+ use_original_ids: true
80
+ use_original_sizes_box: true
81
+ use_presence: ${scratch.use_presence_eval}
82
+
83
+ box_postprocessor:
84
+ _target_: sam3.eval.postprocessors.PostProcessImage
85
+ max_dets_per_img: -1 #infinite detections
86
+ use_original_ids: false
87
+ use_original_sizes_box: false
88
+ use_presence: ${scratch.use_presence_eval}
89
+
90
+ box_postprocessor_thresholded:
91
+ _target_: sam3.eval.postprocessors.PostProcessImage
92
+ max_dets_per_img: -1 #infinite detections
93
+ use_original_ids: false
94
+ use_original_sizes_box: false
95
+ detection_threshold: 0.3
96
+ use_presence: ${scratch.use_presence_eval}
97
+
98
+ mask_postprocessor_thresholded:
99
+ _target_: sam3.eval.postprocessors.PostProcessImage
100
+ max_dets_per_img: -1 #infinite detections
101
+ iou_type: "segm"
102
+ use_original_ids: false
103
+ use_original_sizes_box: false
104
+ use_original_sizes_mask: true
105
+ convert_mask_to_rle: True
106
+ detection_threshold: 0.3
107
+ use_presence: ${scratch.use_presence_eval}
108
+
109
+ # Image processing parameters
110
+ resolution: 1008
111
+ max_ann_per_img: 200
112
+
113
+ # Normalization parameters
114
+ train_norm_mean: [0.5, 0.5, 0.5]
115
+ train_norm_std: [0.5, 0.5, 0.5]
116
+ val_norm_mean: [0.5, 0.5, 0.5]
117
+ val_norm_std: [0.5, 0.5, 0.5]
118
+
119
+ # Training parameters
120
+ train_batch_size: 1
121
+ val_batch_size: 1
122
+ num_train_workers: 0
123
+ num_val_workers: 10 # change this depending on the number of cpu cores available
124
+ max_data_epochs: 20
125
+ target_epoch_size: 1500
126
+ hybrid_repeats: 1
127
+ context_length: 2
128
+
129
+ # All reduce - this controls how the predictions are sent back to node 0.
130
+ # If you have a lot of ram, CPU gather is faster. Otherwise, we provide a fallback through filesystem (eg NFS)
131
+ # Switch to true if you get cpu ooms during gather.
132
+ gather_pred_via_filesys: false
133
+
134
+ # Learning rate and scheduler parameters (unused for eval)
135
+ lr_scale: 0.1
136
+ lr_transformer: ${times:8e-4,${scratch.lr_scale}}
137
+ lr_vision_backbone: ${times:2.5e-4,${scratch.lr_scale}}
138
+ lr_language_backbone: ${times:5e-5,${scratch.lr_scale}}
139
+ lrd_vision_backbone: 0.9 # (lower for in-domain adn higher for ood)
140
+ wd: 0.1
141
+ scheduler_timescale: 20
142
+ scheduler_warmup: 20
143
+ scheduler_cooldown: 20
144
+
145
+
146
+ # ============================================================================
147
+ # Trainer Configuration
148
+ # ============================================================================
149
+
150
+ trainer:
151
+ _target_: sam3.train.trainer.Trainer
152
+ skip_saving_ckpts: true
153
+ empty_gpu_mem_cache_after_eval: True
154
+ skip_first_val: True
155
+ max_epochs: ${scratch.max_data_epochs}
156
+ accelerator: cuda
157
+ seed_value: 123
158
+ val_epoch_freq: 10
159
+ mode: val
160
+
161
+ distributed:
162
+ backend: nccl
163
+ find_unused_parameters: True
164
+ gradient_as_bucket_view: True
165
+
166
+ loss:
167
+ all:
168
+ _target_: sam3.train.loss.sam3_loss.DummyLoss
169
+ default:
170
+ _target_: sam3.train.loss.sam3_loss.DummyLoss
171
+
172
+ data:
173
+ train: null
174
+ val: null
175
+
176
+ model:
177
+ _target_: sam3.model_builder.build_sam3_image_model
178
+ bpe_path: ${paths.bpe_path}
179
+ device: cpus
180
+ eval_mode: true
181
+ enable_segmentation: true # Warning: Enable this if using segmentation.
182
+ checkpoint_path: ${paths.checkpoint_path}
183
+
184
+ meters:
185
+ val: null
186
+
187
+ optim:
188
+ amp:
189
+ enabled: True
190
+ amp_dtype: bfloat16
191
+
192
+ optimizer:
193
+ _target_: torch.optim.AdamW
194
+
195
+ gradient_clip:
196
+ _target_: sam3.train.optim.optimizer.GradientClipper
197
+ max_norm: 0.1
198
+ norm_type: 2
199
+
200
+ param_group_modifiers:
201
+ - _target_: sam3.train.optim.optimizer.layer_decay_param_modifier
202
+ _partial_: True
203
+ layer_decay_value: ${scratch.lrd_vision_backbone}
204
+ apply_to: 'backbone.vision_backbone.trunk'
205
+ overrides:
206
+ - pattern: '*pos_embed*'
207
+ value: 1.0
208
+
209
+ options:
210
+ lr:
211
+ - scheduler: # transformer and class_embed
212
+ _target_: sam3.train.optim.schedulers.InverseSquareRootParamScheduler
213
+ base_lr: ${scratch.lr_transformer}
214
+ timescale: ${scratch.scheduler_timescale}
215
+ warmup_steps: ${scratch.scheduler_warmup}
216
+ cooldown_steps: ${scratch.scheduler_cooldown}
217
+ - scheduler:
218
+ _target_: sam3.train.optim.schedulers.InverseSquareRootParamScheduler
219
+ base_lr: ${scratch.lr_vision_backbone}
220
+ timescale: ${scratch.scheduler_timescale}
221
+ warmup_steps: ${scratch.scheduler_warmup}
222
+ cooldown_steps: ${scratch.scheduler_cooldown}
223
+ param_names:
224
+ - 'backbone.vision_backbone.*'
225
+ - scheduler:
226
+ _target_: sam3.train.optim.schedulers.InverseSquareRootParamScheduler
227
+ base_lr: ${scratch.lr_language_backbone}
228
+ timescale: ${scratch.scheduler_timescale}
229
+ warmup_steps: ${scratch.scheduler_warmup}
230
+ cooldown_steps: ${scratch.scheduler_cooldown}
231
+ param_names:
232
+ - 'backbone.language_backbone.*'
233
+
234
+ weight_decay:
235
+ - scheduler:
236
+ _target_: fvcore.common.param_scheduler.ConstantParamScheduler
237
+ value: ${scratch.wd}
238
+ - scheduler:
239
+ _target_: fvcore.common.param_scheduler.ConstantParamScheduler
240
+ value: 0.0
241
+ param_names:
242
+ - '*bias*'
243
+ module_cls_names: ['torch.nn.LayerNorm']
244
+
245
+ checkpoint:
246
+ save_dir: ${launcher.experiment_log_dir}/checkpoints
247
+ save_freq: 0 # 0 only last checkpoint is saved.
248
+
249
+
250
+ logging:
251
+ tensorboard_writer:
252
+ _target_: sam3.train.utils.logger.make_tensorboard_logger
253
+ log_dir: ${launcher.experiment_log_dir}/tensorboard
254
+ flush_secs: 120
255
+ should_log: True
256
+ wandb_writer: null
257
+ log_dir: ${launcher.experiment_log_dir}/logs/
258
+ log_freq: 10
259
+
260
+ # ============================================================================
261
+ # Launcher and Submitit Configuration
262
+ # ============================================================================
263
+
264
+ launcher:
265
+ num_nodes: 4
266
+ gpus_per_node: 8
267
+ experiment_log_dir: ${paths.experiment_log_dir}
268
+ multiprocessing_context: forkserver
269
+
270
+
271
+ submitit:
272
+ account: null # Add your SLURM account if use_cluster == 1
273
+ partition: null
274
+ qos: null # Add your QoS if use_cluster == 1
275
+ timeout_hour: 72
276
+ use_cluster: True
277
+ cpus_per_task: 10
278
+ port_range: [10000, 65000]
279
+ constraint: null
source_code/sam3/sam3/train/configs/gold_image_evals/sam3_gold_image_attributes.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @package _global_
2
+ defaults:
3
+ - /configs/eval_base.yaml
4
+ - _self_
5
+
6
+ # ============================================================================
7
+ # Paths Configuration (you can override here, but it shouldn't require further changes if eval_base.yaml is correct
8
+ # ============================================================================
9
+ paths:
10
+ experiment_log_dir: ${paths.base_experiment_log_dir}/gold_attributes/
11
+ coco_gt: ${paths.base_annotation_path}/gold_attributes_merged_a_release_test.json
12
+ coco_gts:
13
+ - ${paths.base_annotation_path}/gold_attributes_merged_a_release_test.json
14
+ - ${paths.base_annotation_path}/gold_attributes_merged_b_release_test.json
15
+ - ${paths.base_annotation_path}/gold_attributes_merged_c_release_test.json
16
+
17
+
18
+ # ============================================================================
19
+ # Trainer Configuration
20
+ # ============================================================================
21
+
22
+ trainer:
23
+ data:
24
+ val:
25
+ _target_: sam3.train.data.torch_dataset.TorchDataset
26
+ dataset:
27
+ _target_: sam3.train.data.sam3_image_dataset.Sam3ImageDataset
28
+ coco_json_loader:
29
+ _target_: sam3.train.data.coco_json_loaders.SAM3_EVAL_API_FROM_JSON_NP
30
+ _partial_: true
31
+ img_folder: ${paths.metaclip_img_path}
32
+ ann_file: ${paths.coco_gt}
33
+ transforms: ${scratch.base_val_transform}
34
+ max_ann_per_img: 100000
35
+ multiplier: 1
36
+ training: false
37
+
38
+ shuffle: False
39
+ batch_size: ${scratch.val_batch_size}
40
+ num_workers: ${scratch.num_val_workers}
41
+ pin_memory: False
42
+ drop_last: False
43
+ collate_fn:
44
+ _target_: sam3.train.data.collator.collate_fn_api
45
+ _partial_: true
46
+ repeats: ${scratch.hybrid_repeats}
47
+ dict_key: gold_attributes
48
+
49
+ meters:
50
+ val:
51
+ gold_attributes: # this key matches the "dict_key" in the dataloader's collate function
52
+ cgf1:
53
+ _target_: sam3.eval.coco_writer.PredictionDumper
54
+ iou_type: "segm"
55
+ dump_dir: ${launcher.experiment_log_dir}/dumps/gold_attributes
56
+ merge_predictions: True
57
+ postprocessor: ${scratch.mask_postprocessor_thresholded}
58
+ gather_pred_via_filesys: ${scratch.gather_pred_via_filesys}
59
+ maxdets: 1000000 # no limit
60
+ pred_file_evaluators:
61
+ - _target_: sam3.eval.cgf1_eval.CGF1Evaluator
62
+ gt_path: ${paths.coco_gts}
63
+ iou_type: "bbox"
64
+ - _target_: sam3.eval.cgf1_eval.CGF1Evaluator
65
+ gt_path: ${paths.coco_gts}
66
+ iou_type: "segm"
source_code/sam3/sam3/train/configs/gold_image_evals/sam3_gold_image_metaclip_nps.yaml ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @package _global_
2
+ defaults:
3
+ - /configs/eval_base.yaml
4
+ - _self_
5
+
6
+ # ============================================================================
7
+ # Paths Configuration (you can override here, but it shouldn't require further changes if eval_base.yaml is correct
8
+ # ============================================================================
9
+ paths:
10
+ experiment_log_dir: ${paths.base_experiment_log_dir}/gold_metaclip_nps/
11
+ coco_gt: ${paths.base_annotation_path}/gold_metaclip_merged_a_release_test.json
12
+ coco_gts:
13
+ - ${paths.base_annotation_path}/gold_metaclip_merged_a_release_test.json
14
+ - ${paths.base_annotation_path}/gold_metaclip_merged_b_release_test.json
15
+ - ${paths.base_annotation_path}/gold_metaclip_merged_c_release_test.json
16
+
17
+
18
+ # ============================================================================
19
+ # Trainer Configuration
20
+ # ============================================================================
21
+
22
+ trainer:
23
+ data:
24
+ val:
25
+ _target_: sam3.train.data.torch_dataset.TorchDataset
26
+ dataset:
27
+ _target_: sam3.train.data.sam3_image_dataset.Sam3ImageDataset
28
+ coco_json_loader:
29
+ _target_: sam3.train.data.coco_json_loaders.SAM3_EVAL_API_FROM_JSON_NP
30
+ _partial_: true
31
+ img_folder: ${paths.metaclip_img_path}
32
+ ann_file: ${paths.coco_gt}
33
+ transforms: ${scratch.base_val_transform}
34
+ max_ann_per_img: 100000
35
+ multiplier: 1
36
+ training: false
37
+
38
+ shuffle: False
39
+ batch_size: ${scratch.val_batch_size}
40
+ num_workers: ${scratch.num_val_workers}
41
+ pin_memory: False
42
+ drop_last: False
43
+ collate_fn:
44
+ _target_: sam3.train.data.collator.collate_fn_api
45
+ _partial_: true
46
+ repeats: ${scratch.hybrid_repeats}
47
+ dict_key: gold_metaclip_nps
48
+
49
+ meters:
50
+ val:
51
+ gold_metaclip_nps: # this key matches the "dict_key" in the dataloader's collate function
52
+ cgf1:
53
+ _target_: sam3.eval.coco_writer.PredictionDumper
54
+ iou_type: "segm"
55
+ dump_dir: ${launcher.experiment_log_dir}/dumps/gold_metaclip_nps
56
+ merge_predictions: True
57
+ postprocessor: ${scratch.mask_postprocessor_thresholded}
58
+ gather_pred_via_filesys: ${scratch.gather_pred_via_filesys}
59
+ maxdets: 1000000 # no limit
60
+ pred_file_evaluators:
61
+ - _target_: sam3.eval.cgf1_eval.CGF1Evaluator
62
+ gt_path: ${paths.coco_gts}
63
+ iou_type: "bbox"
64
+ - _target_: sam3.eval.cgf1_eval.CGF1Evaluator
65
+ gt_path: ${paths.coco_gts}
66
+ iou_type: "segm"
source_code/sam3/sam3/train/configs/odinw13/odinw_text_only_positive.yaml ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @package _global_
2
+ defaults:
3
+ - _self_
4
+
5
+ # ============================================================================
6
+ # Paths Configuration (Chage this to your own paths)
7
+ # ============================================================================
8
+ # python sam3/train/train.py -c configs/odinw_text_only.yaml --use-cluster 1 --partition ${PARTITION} --account ${ACCOUNT} --qos ${QoS}
9
+
10
+ paths:
11
+ odinw_data_root: <YOUR_DATA_DIR>
12
+ experiment_log_dir: <YOUR EXPERIMENET LOG_DIR>
13
+ bpe_path: <BPE_PATH> # This should be under assets/bpe_simple_vocab_16e6.txt.gz
14
+
15
+
16
+ supercategory_tuple: ${all_odinw_supercategories.${string:${submitit.job_array.task_index}}}
17
+ # Validation transforms pipeline
18
+ val_transforms:
19
+ - _target_: sam3.train.transforms.basic_for_api.ComposeAPI
20
+ transforms:
21
+ - _target_: sam3.train.transforms.basic_for_api.RandomResizeAPI
22
+ sizes: ${scratch.resolution}
23
+ max_size:
24
+ _target_: sam3.train.transforms.basic.get_random_resize_max_size
25
+ size: ${scratch.resolution}
26
+ square: true
27
+ consistent_transform: False
28
+ - _target_: sam3.train.transforms.basic_for_api.ToTensorAPI
29
+ - _target_: sam3.train.transforms.basic_for_api.NormalizeAPI
30
+ mean: ${scratch.val_norm_mean}
31
+ std: ${scratch.val_norm_std}
32
+
33
+ # ============================================================================
34
+ # Different helper parameters and functions
35
+ # ============================================================================
36
+ scratch:
37
+ enable_segmentation: True
38
+ # Box processing
39
+ use_presence_eval: True
40
+ original_box_postprocessor:
41
+ _target_: sam3.eval.postprocessors.PostProcessImage
42
+ max_dets_per_img: -1 # infinite detections
43
+ use_original_ids: true
44
+ use_original_sizes_box: true
45
+ use_presence: ${scratch.use_presence_eval}
46
+
47
+ # Image processing parameters
48
+ resolution: 1008
49
+ # Normalization parameters
50
+ val_norm_mean: [0.5, 0.5, 0.5]
51
+ val_norm_std: [0.5, 0.5, 0.5]
52
+
53
+ # Training parameters
54
+ val_batch_size: 2
55
+ num_val_workers: 0
56
+ gather_pred_via_filesys: false
57
+
58
+ # ============================================================================
59
+ # Trainer Configuration
60
+ # ============================================================================
61
+
62
+ trainer:
63
+ _target_: sam3.train.trainer.Trainer
64
+ skip_saving_ckpts: true
65
+ empty_gpu_mem_cache_after_eval: True
66
+ max_epochs: 1
67
+ accelerator: cuda
68
+ seed_value: 123
69
+ mode: val
70
+
71
+ distributed:
72
+ backend: nccl
73
+ find_unused_parameters: True
74
+ gradient_as_bucket_view: True
75
+
76
+ loss:
77
+ default:
78
+ _target_: sam3.train.loss.sam3_loss.DummyLoss
79
+
80
+ data:
81
+ val:
82
+ _target_: sam3.train.data.torch_dataset.TorchDataset
83
+ dataset:
84
+ _target_: sam3.train.data.sam3_image_dataset.Sam3ImageDataset
85
+ coco_json_loader:
86
+ _target_: sam3.train.data.coco_json_loaders.COCO_FROM_JSON
87
+ prompts: ${odinw35_prompts.${supercategory_tuple.name}}
88
+ include_negatives: true
89
+ category_chunk_size: 20 # Note: Since we are doing AP +ve we need to include all categories!
90
+ _partial_: true
91
+ img_folder: ${paths.odinw_data_root}/${supercategory_tuple.val.img_folder}
92
+ ann_file:
93
+ _target_: sam3.eval.coco_reindex.reindex_coco_to_temp
94
+ input_json_path: ${paths.odinw_data_root}/${supercategory_tuple.val.json}
95
+ transforms: ${val_transforms}
96
+ max_ann_per_img: 100000
97
+ multiplier: 1
98
+ training: false
99
+
100
+ shuffle: False
101
+ batch_size: ${scratch.val_batch_size}
102
+ num_workers: ${scratch.num_val_workers}
103
+ pin_memory: False
104
+ drop_last: False
105
+ collate_fn:
106
+ _target_: sam3.train.data.collator.collate_fn_api
107
+ _partial_: true
108
+ repeats: 1
109
+ dict_key: odinw35
110
+
111
+ model:
112
+ _target_: sam3.model_builder.build_sam3_image_model
113
+ bpe_path: ${paths.bpe_path}
114
+ device: cpus
115
+ eval_mode: true # Set to false if training
116
+ enable_segmentation: ${scratch.enable_segmentation} # Warning: Enable this if using segmentation.
117
+
118
+ meters:
119
+ val:
120
+ odinw35:
121
+ detection:
122
+ _target_: sam3.eval.coco_writer.PredictionDumper
123
+ iou_type: "bbox"
124
+ dump_dir: ${launcher.experiment_log_dir}/dumps/roboflow/${supercategory_tuple.name}
125
+ merge_predictions: True
126
+ postprocessor: ${scratch.original_box_postprocessor}
127
+ gather_pred_via_filesys: ${scratch.gather_pred_via_filesys}
128
+ maxdets: 100
129
+ pred_file_evaluators:
130
+ - _target_: sam3.eval.coco_eval_offline.CocoEvaluatorOfflineWithPredFileEvaluators
131
+ gt_path:
132
+ _target_: sam3.eval.coco_reindex.reindex_coco_to_temp
133
+ input_json_path: ${paths.odinw_data_root}/${supercategory_tuple.val.json}
134
+ tide: False
135
+ iou_type: "bbox"
136
+ positive_split: true
137
+
138
+ checkpoint:
139
+ save_dir: ${launcher.experiment_log_dir}/checkpoints
140
+ save_freq: 0 # 0 only last checkpoint is saved.
141
+
142
+
143
+ logging:
144
+ tensorboard_writer:
145
+ _target_: sam3.train.utils.logger.make_tensorboard_logger
146
+ log_dir: ${launcher.experiment_log_dir}/tensorboard
147
+ flush_secs: 120
148
+ should_log: True
149
+ wandb_writer: null
150
+ log_dir: ${launcher.experiment_log_dir}/logs/${supercategory_tuple.name}
151
+ log_freq: 10
152
+
153
+ # ============================================================================
154
+ # Launcher and Submitit Configuration
155
+ # ============================================================================
156
+
157
+ launcher:
158
+ num_nodes: 1
159
+ gpus_per_node: 2
160
+ experiment_log_dir: ${paths.experiment_log_dir}
161
+ multiprocessing_context: forkserver
162
+
163
+ submitit:
164
+ account: null
165
+ partition: null
166
+ qos: null
167
+ timeout_hour: 72
168
+ use_cluster: True
169
+ cpus_per_task: 10
170
+ port_range: [10000, 65000]
171
+ constraint: null
172
+
173
+ job_array:
174
+ num_tasks: 13
175
+ task_index: 0
176
+
177
+ # ============================================================================
178
+ # ODinW13 Supercategories
179
+ # ============================================================================
180
+
181
+ all_odinw_supercategories:
182
+ - name: AerialMaritimeDrone_large
183
+ val:
184
+ img_folder: AerialMaritimeDrone/large/test/
185
+ json: AerialMaritimeDrone/large/test/annotations_without_background.json
186
+ - name: Aquarium
187
+ val:
188
+ img_folder: Aquarium/Aquarium Combined.v2-raw-1024.coco/test/
189
+ json: Aquarium/Aquarium Combined.v2-raw-1024.coco/test/annotations_without_background.json
190
+ - name: CottontailRabbits
191
+ val:
192
+ img_folder: CottontailRabbits/test/
193
+ json: CottontailRabbits/test/annotations_without_background.json
194
+ - name: EgoHands_generic
195
+ val:
196
+ img_folder: EgoHands/generic/test/
197
+ json: EgoHands/generic/test/annotations_without_background.json
198
+ - name: NorthAmericaMushrooms
199
+ val:
200
+ img_folder: NorthAmericaMushrooms/North American Mushrooms.v1-416x416.coco/test/
201
+ json: NorthAmericaMushrooms/North American Mushrooms.v1-416x416.coco/test/annotations_without_background.json
202
+ - name: Packages
203
+ val:
204
+ img_folder: Packages/Raw/test/
205
+ json: Packages/Raw/test/annotations_without_background.json
206
+ - name: PascalVOC
207
+ val:
208
+ img_folder: PascalVOC/valid/
209
+ json: PascalVOC/valid/annotations_without_background.json
210
+ - name: Raccoon
211
+ val:
212
+ img_folder: Raccoon/Raccoon.v2-raw.coco/test/
213
+ json: Raccoon/Raccoon.v2-raw.coco/test/annotations_without_background.json
214
+ - name: ShellfishOpenImages
215
+ val:
216
+ img_folder: ShellfishOpenImages/raw/test/
217
+ json: ShellfishOpenImages/raw/test/annotations_without_background.json
218
+ - name: VehiclesOpenImages
219
+ val:
220
+ img_folder: VehiclesOpenImages/416x416/test/
221
+ json: VehiclesOpenImages/416x416/test/annotations_without_background.json
222
+ - name: pistols
223
+ val:
224
+ img_folder: pistols/export/
225
+ json: pistols/export/test_annotations_without_background.json
226
+ - name: pothole
227
+ val:
228
+ img_folder: pothole/test/
229
+ json: pothole/test/annotations_without_background.json
230
+ - name: thermalDogsAndPeople
231
+ val:
232
+ img_folder: thermalDogsAndPeople/test/
233
+ json: thermalDogsAndPeople/test/annotations_without_background.json
234
+
235
+
236
+ odinw35_prompts:
237
+ AerialMaritimeDrone_large: '[{"id": 1, "name": "boat", "supercategory": "movable-objects"},
238
+ {"id": 2, "name": "car", "supercategory": "movable-objects"}, {"id": 3, "name": "dock",
239
+ "supercategory": "movable-objects"}, {"id": 4, "name": "jet ski", "supercategory": "movable-objects"},
240
+ {"id": 5, "name": "boat lift", "supercategory": "movable-objects"}]'
241
+ Aquarium: null
242
+ CottontailRabbits: null
243
+ EgoHands_generic: null
244
+ NorthAmericaMushrooms: '[{''id'': 1, ''name'':
245
+ ''chicken of the woods'', ''supercategory'': ''mushroom''}, {''id'': 2, ''name'': ''chanterelle'', ''supercategory'': ''mushroom''}]'
246
+ Packages: null
247
+ PascalVOC: null
248
+ Raccoon: null
249
+ ShellfishOpenImages: null
250
+ VehiclesOpenImages: null
251
+ pistols: null
252
+ pothole: null
253
+ thermalDogsAndPeople: null
source_code/sam3/sam3/train/configs/odinw13/odinw_visual_only.yaml ADDED
@@ -0,0 +1,256 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @package _global_
2
+ defaults:
3
+ - _self_
4
+
5
+ # ============================================================================
6
+ # Paths Configuration (Chage this to your own paths)
7
+ # ============================================================================
8
+ # python sam3/train/train.py -c configs/odinw_text_only.yaml --use-cluster 1 --partition ${PARTITION} --account ${ACCOUNT} --qos ${QoS}
9
+
10
+ paths:
11
+ odinw_data_root: <YOUR_DATA_DIR>
12
+ experiment_log_dir: <YOUR EXPERIMENET LOG_DIR>
13
+ bpe_path: <BPE_PATH> # This should be under assets/bpe_simple_vocab_16e6.txt.gz
14
+
15
+
16
+ supercategory_tuple: ${all_odinw_supercategories.${string:${submitit.job_array.task_index}}}
17
+ # Validation transforms pipeline
18
+ val_transforms:
19
+ - _target_: sam3.train.transforms.basic_for_api.ComposeAPI
20
+ transforms:
21
+ - _target_: sam3.train.transforms.basic_for_api.RandomResizeAPI
22
+ sizes: ${scratch.resolution}
23
+ max_size:
24
+ _target_: sam3.train.transforms.basic.get_random_resize_max_size
25
+ size: ${scratch.resolution}
26
+ square: true
27
+ consistent_transform: False
28
+ - _target_: sam3.train.transforms.basic_for_api.ToTensorAPI
29
+ - _target_: sam3.train.transforms.basic_for_api.NormalizeAPI
30
+ mean: ${scratch.val_norm_mean}
31
+ std: ${scratch.val_norm_std}
32
+ - _target_: sam3.train.transforms.filter_query_transforms.TextQueryToVisual
33
+ keep_text_queries: false # Note: set this to false if you only want visual
34
+ probability: 1.0 # always
35
+
36
+ # ============================================================================
37
+ # Different helper parameters and functions
38
+ # ============================================================================
39
+ scratch:
40
+ enable_segmentation: True
41
+ # Box processing
42
+ use_presence_eval: True
43
+ original_box_postprocessor:
44
+ _target_: sam3.eval.postprocessors.PostProcessImage
45
+ max_dets_per_img: -1 # infinite detections
46
+ use_original_ids: true
47
+ use_original_sizes_box: true
48
+ use_presence: ${scratch.use_presence_eval}
49
+
50
+ # Image processing parameters
51
+ resolution: 1008
52
+ # Normalization parameters
53
+ val_norm_mean: [0.5, 0.5, 0.5]
54
+ val_norm_std: [0.5, 0.5, 0.5]
55
+
56
+ # Training parameters
57
+ val_batch_size: 2
58
+ num_val_workers: 0
59
+ gather_pred_via_filesys: false
60
+
61
+ # ============================================================================
62
+ # Trainer Configuration
63
+ # ============================================================================
64
+
65
+ trainer:
66
+ _target_: sam3.train.trainer.Trainer
67
+ skip_saving_ckpts: true
68
+ empty_gpu_mem_cache_after_eval: True
69
+ max_epochs: 1
70
+ accelerator: cuda
71
+ seed_value: 123
72
+ mode: val
73
+
74
+ distributed:
75
+ backend: nccl
76
+ find_unused_parameters: True
77
+ gradient_as_bucket_view: True
78
+
79
+ loss:
80
+ default:
81
+ _target_: sam3.train.loss.sam3_loss.DummyLoss
82
+
83
+ data:
84
+ val:
85
+ _target_: sam3.train.data.torch_dataset.TorchDataset
86
+ dataset:
87
+ _target_: sam3.train.data.sam3_image_dataset.Sam3ImageDataset
88
+ coco_json_loader:
89
+ _target_: sam3.train.data.coco_json_loaders.COCO_FROM_JSON
90
+ prompts: ${odinw35_prompts.${supercategory_tuple.name}}
91
+ include_negatives: true
92
+ category_chunk_size: 20 # Note: Since we are doing AP +ve we need to include all categories!
93
+ _partial_: true
94
+ img_folder: ${paths.odinw_data_root}/${supercategory_tuple.val.img_folder}
95
+ ann_file:
96
+ _target_: sam3.eval.coco_reindex.reindex_coco_to_temp
97
+ input_json_path: ${paths.odinw_data_root}/${supercategory_tuple.val.json}
98
+ transforms: ${val_transforms}
99
+ max_ann_per_img: 100000
100
+ multiplier: 1
101
+ training: false
102
+
103
+ shuffle: False
104
+ batch_size: ${scratch.val_batch_size}
105
+ num_workers: ${scratch.num_val_workers}
106
+ pin_memory: False
107
+ drop_last: False
108
+ collate_fn:
109
+ _target_: sam3.train.data.collator.collate_fn_api
110
+ _partial_: true
111
+ repeats: 1
112
+ dict_key: odinw35
113
+
114
+ model:
115
+ _target_: sam3.model_builder.build_sam3_image_model
116
+ bpe_path: ${paths.bpe_path}
117
+ device: cpus
118
+ eval_mode: true # Set to false if training
119
+ enable_segmentation: ${scratch.enable_segmentation} # Warning: Enable this if using segmentation.
120
+
121
+ meters:
122
+ val:
123
+ odinw35:
124
+ detection:
125
+ _target_: sam3.eval.coco_writer.PredictionDumper
126
+ iou_type: "bbox"
127
+ dump_dir: ${launcher.experiment_log_dir}/dumps/roboflow/${supercategory_tuple.name}
128
+ merge_predictions: True
129
+ postprocessor: ${scratch.original_box_postprocessor}
130
+ gather_pred_via_filesys: ${scratch.gather_pred_via_filesys}
131
+ maxdets: 100
132
+ pred_file_evaluators:
133
+ - _target_: sam3.eval.coco_eval_offline.CocoEvaluatorOfflineWithPredFileEvaluators
134
+ gt_path:
135
+ _target_: sam3.eval.coco_reindex.reindex_coco_to_temp
136
+ input_json_path: ${paths.odinw_data_root}/${supercategory_tuple.val.json}
137
+ tide: False
138
+ iou_type: "bbox"
139
+ positive_split: true
140
+
141
+ checkpoint:
142
+ save_dir: ${launcher.experiment_log_dir}/checkpoints
143
+ save_freq: 0 # 0 only last checkpoint is saved.
144
+
145
+
146
+ logging:
147
+ tensorboard_writer:
148
+ _target_: sam3.train.utils.logger.make_tensorboard_logger
149
+ log_dir: ${launcher.experiment_log_dir}/tensorboard
150
+ flush_secs: 120
151
+ should_log: True
152
+ wandb_writer: null
153
+ log_dir: ${launcher.experiment_log_dir}/logs/${supercategory_tuple.name}
154
+ log_freq: 10
155
+
156
+ # ============================================================================
157
+ # Launcher and Submitit Configuration
158
+ # ============================================================================
159
+
160
+ launcher:
161
+ num_nodes: 1
162
+ gpus_per_node: 2
163
+ experiment_log_dir: ${paths.experiment_log_dir}
164
+ multiprocessing_context: forkserver
165
+
166
+ submitit:
167
+ account: null
168
+ partition: null
169
+ qos: null
170
+ timeout_hour: 72
171
+ use_cluster: True
172
+ cpus_per_task: 10
173
+ port_range: [10000, 65000]
174
+ constraint: null
175
+
176
+ job_array:
177
+ num_tasks: 13
178
+ task_index: 0
179
+
180
+ # ============================================================================
181
+ # ODinW13 Supercategories
182
+ # ============================================================================
183
+
184
+ all_odinw_supercategories:
185
+ - name: AerialMaritimeDrone_large
186
+ val:
187
+ img_folder: AerialMaritimeDrone/large/test/
188
+ json: AerialMaritimeDrone/large/test/annotations_without_background.json
189
+ - name: Aquarium
190
+ val:
191
+ img_folder: Aquarium/Aquarium Combined.v2-raw-1024.coco/test/
192
+ json: Aquarium/Aquarium Combined.v2-raw-1024.coco/test/annotations_without_background.json
193
+ - name: CottontailRabbits
194
+ val:
195
+ img_folder: CottontailRabbits/test/
196
+ json: CottontailRabbits/test/annotations_without_background.json
197
+ - name: EgoHands_generic
198
+ val:
199
+ img_folder: EgoHands/generic/test/
200
+ json: EgoHands/generic/test/annotations_without_background.json
201
+ - name: NorthAmericaMushrooms
202
+ val:
203
+ img_folder: NorthAmericaMushrooms/North American Mushrooms.v1-416x416.coco/test/
204
+ json: NorthAmericaMushrooms/North American Mushrooms.v1-416x416.coco/test/annotations_without_background.json
205
+ - name: Packages
206
+ val:
207
+ img_folder: Packages/Raw/test/
208
+ json: Packages/Raw/test/annotations_without_background.json
209
+ - name: PascalVOC
210
+ val:
211
+ img_folder: PascalVOC/valid/
212
+ json: PascalVOC/valid/annotations_without_background.json
213
+ - name: Raccoon
214
+ val:
215
+ img_folder: Raccoon/Raccoon.v2-raw.coco/test/
216
+ json: Raccoon/Raccoon.v2-raw.coco/test/annotations_without_background.json
217
+ - name: ShellfishOpenImages
218
+ val:
219
+ img_folder: ShellfishOpenImages/raw/test/
220
+ json: ShellfishOpenImages/raw/test/annotations_without_background.json
221
+ - name: VehiclesOpenImages
222
+ val:
223
+ img_folder: VehiclesOpenImages/416x416/test/
224
+ json: VehiclesOpenImages/416x416/test/annotations_without_background.json
225
+ - name: pistols
226
+ val:
227
+ img_folder: pistols/export/
228
+ json: pistols/export/test_annotations_without_background.json
229
+ - name: pothole
230
+ val:
231
+ img_folder: pothole/test/
232
+ json: pothole/test/annotations_without_background.json
233
+ - name: thermalDogsAndPeople
234
+ val:
235
+ img_folder: thermalDogsAndPeople/test/
236
+ json: thermalDogsAndPeople/test/annotations_without_background.json
237
+
238
+
239
+ odinw35_prompts:
240
+ AerialMaritimeDrone_large: '[{"id": 1, "name": "boat", "supercategory": "movable-objects"},
241
+ {"id": 2, "name": "car", "supercategory": "movable-objects"}, {"id": 3, "name": "dock",
242
+ "supercategory": "movable-objects"}, {"id": 4, "name": "jet ski", "supercategory": "movable-objects"},
243
+ {"id": 5, "name": "boat lift", "supercategory": "movable-objects"}]'
244
+ Aquarium: null
245
+ CottontailRabbits: null
246
+ EgoHands_generic: null
247
+ NorthAmericaMushrooms: '[{''id'': 1, ''name'':
248
+ ''chicken of the woods'', ''supercategory'': ''mushroom''}, {''id'': 2, ''name'': ''chanterelle'', ''supercategory'': ''mushroom''}]'
249
+ Packages: null
250
+ PascalVOC: null
251
+ Raccoon: null
252
+ ShellfishOpenImages: null
253
+ VehiclesOpenImages: null
254
+ pistols: null
255
+ pothole: null
256
+ thermalDogsAndPeople: null
source_code/sam3/sam3/train/configs/roboflow_v100/roboflow_v100_full_ft_100_images.yaml ADDED
@@ -0,0 +1,539 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # @package _global_
2
+ defaults:
3
+ - _self_
4
+
5
+ # ============================================================================
6
+ # Paths Configuration (Chage this to your own paths)
7
+ # ============================================================================
8
+ paths:
9
+ roboflow_vl_100_root: <YOUR_DATASET_DIR>
10
+ experiment_log_dir: <YOUR EXPERIMENET LOG_DIR>
11
+ bpe_path: <BPE_PATH> # This should be under assets/bpe_simple_vocab_16e6.txt.gz
12
+
13
+ # Roboflow dataset configuration
14
+ roboflow_train:
15
+ num_images: 100 # Note: This is the number of images used for training. If null, all images are used.
16
+ supercategory: ${all_roboflow_supercategories.${string:${submitit.job_array.task_index}}}
17
+
18
+ # Training transforms pipeline
19
+ train_transforms:
20
+ - _target_: sam3.train.transforms.basic_for_api.ComposeAPI
21
+ transforms:
22
+ - _target_: sam3.train.transforms.filter_query_transforms.FlexibleFilterFindGetQueries
23
+ query_filter:
24
+ _target_: sam3.train.transforms.filter_query_transforms.FilterCrowds
25
+ - _target_: sam3.train.transforms.point_sampling.RandomizeInputBbox
26
+ box_noise_std: 0.1
27
+ box_noise_max: 20
28
+ - _target_: sam3.train.transforms.segmentation.DecodeRle
29
+ - _target_: sam3.train.transforms.basic_for_api.RandomResizeAPI
30
+ sizes:
31
+ _target_: sam3.train.transforms.basic.get_random_resize_scales
32
+ size: ${scratch.resolution}
33
+ min_size: 480
34
+ rounded: false
35
+ max_size:
36
+ _target_: sam3.train.transforms.basic.get_random_resize_max_size
37
+ size: ${scratch.resolution}
38
+ square: true
39
+ consistent_transform: ${scratch.consistent_transform}
40
+ - _target_: sam3.train.transforms.basic_for_api.PadToSizeAPI
41
+ size: ${scratch.resolution}
42
+ consistent_transform: ${scratch.consistent_transform}
43
+ - _target_: sam3.train.transforms.basic_for_api.ToTensorAPI
44
+ - _target_: sam3.train.transforms.filter_query_transforms.FlexibleFilterFindGetQueries
45
+ query_filter:
46
+ _target_: sam3.train.transforms.filter_query_transforms.FilterEmptyTargets
47
+ - _target_: sam3.train.transforms.basic_for_api.NormalizeAPI
48
+ mean: ${scratch.train_norm_mean}
49
+ std: ${scratch.train_norm_std}
50
+ - _target_: sam3.train.transforms.filter_query_transforms.FlexibleFilterFindGetQueries
51
+ query_filter:
52
+ _target_: sam3.train.transforms.filter_query_transforms.FilterEmptyTargets
53
+ - _target_: sam3.train.transforms.filter_query_transforms.FlexibleFilterFindGetQueries
54
+ query_filter:
55
+ _target_: sam3.train.transforms.filter_query_transforms.FilterFindQueriesWithTooManyOut
56
+ max_num_objects: ${scratch.max_ann_per_img}
57
+
58
+ # Validation transforms pipeline
59
+ val_transforms:
60
+ - _target_: sam3.train.transforms.basic_for_api.ComposeAPI
61
+ transforms:
62
+ - _target_: sam3.train.transforms.basic_for_api.RandomResizeAPI
63
+ sizes: ${scratch.resolution}
64
+ max_size:
65
+ _target_: sam3.train.transforms.basic.get_random_resize_max_size
66
+ size: ${scratch.resolution}
67
+ square: true
68
+ consistent_transform: False
69
+ - _target_: sam3.train.transforms.basic_for_api.ToTensorAPI
70
+ - _target_: sam3.train.transforms.basic_for_api.NormalizeAPI
71
+ mean: ${scratch.train_norm_mean}
72
+ std: ${scratch.train_norm_std}
73
+
74
+ # loss config (no mask loss)
75
+ loss:
76
+ _target_: sam3.train.loss.sam3_loss.Sam3LossWrapper
77
+ matcher: ${scratch.matcher}
78
+ o2m_weight: 2.0
79
+ o2m_matcher:
80
+ _target_: sam3.train.matcher.BinaryOneToManyMatcher
81
+ alpha: 0.3
82
+ threshold: 0.4
83
+ topk: 4
84
+ use_o2m_matcher_on_o2m_aux: false # Another option is true
85
+ loss_fns_find:
86
+ - _target_: sam3.train.loss.loss_fns.Boxes
87
+ weight_dict:
88
+ loss_bbox: 5.0
89
+ loss_giou: 2.0
90
+ - _target_: sam3.train.loss.loss_fns.IABCEMdetr
91
+ weak_loss: False
92
+ weight_dict:
93
+ loss_ce: 20.0 # Another option is 100.0
94
+ presence_loss: 20.0
95
+ pos_weight: 10.0 # Another option is 5.0
96
+ alpha: 0.25
97
+ gamma: 2
98
+ use_presence: True # Change
99
+ pos_focal: false
100
+ pad_n_queries: 200
101
+ pad_scale_pos: 1.0
102
+
103
+ loss_fn_semantic_seg: null
104
+ scale_by_find_batch_size: ${scratch.scale_by_find_batch_size}
105
+
106
+
107
+ # NOTE: Loss to be used for training in case of segmentation
108
+ # loss:
109
+ # _target_: sam3.train.loss.sam3_loss.Sam3LossWrapper
110
+ # matcher: ${scratch.matcher}
111
+ # o2m_weight: 2.0
112
+ # o2m_matcher:
113
+ # _target_: sam3.train.matcher.BinaryOneToManyMatcher
114
+ # alpha: 0.3
115
+ # threshold: 0.4
116
+ # topk: 4
117
+ # use_o2m_matcher_on_o2m_aux: false
118
+ # loss_fns_find:
119
+ # - _target_: sam3.train.loss.loss_fns.Boxes
120
+ # weight_dict:
121
+ # loss_bbox: 5.0
122
+ # loss_giou: 2.0
123
+ # - _target_: sam3.train.loss.loss_fns.IABCEMdetr
124
+ # weak_loss: False
125
+ # weight_dict:
126
+ # loss_ce: 20.0 # Another option is 100.0
127
+ # presence_loss: 20.0
128
+ # pos_weight: 10.0 # Another option is 5.0
129
+ # alpha: 0.25
130
+ # gamma: 2
131
+ # use_presence: True # Change
132
+ # pos_focal: false
133
+ # pad_n_queries: 200
134
+ # pad_scale_pos: 1.0
135
+ # - _target_: sam3.train.loss.loss_fns.Masks
136
+ # focal_alpha: 0.25
137
+ # focal_gamma: 2.0
138
+ # weight_dict:
139
+ # loss_mask: 200.0
140
+ # loss_dice: 10.0
141
+ # compute_aux: false
142
+ # loss_fn_semantic_seg:
143
+ # _target_: sam3.losses.loss_fns.SemanticSegCriterion
144
+ # presence_head: True
145
+ # presence_loss: False # Change
146
+ # focal: True
147
+ # focal_alpha: 0.6
148
+ # focal_gamma: 2.0
149
+ # downsample: False
150
+ # weight_dict:
151
+ # loss_semantic_seg: 20.0
152
+ # loss_semantic_presence: 1.0
153
+ # loss_semantic_dice: 30.0
154
+ # scale_by_find_batch_size: ${scratch.scale_by_find_batch_size}
155
+
156
+ # ============================================================================
157
+ # Different helper parameters and functions
158
+ # ============================================================================
159
+ scratch:
160
+ enable_segmentation: False # NOTE: This is the number of queries used for segmentation
161
+ # Model parameters
162
+ d_model: 256
163
+ pos_embed:
164
+ _target_: sam3.model.position_encoding.PositionEmbeddingSine
165
+ num_pos_feats: ${scratch.d_model}
166
+ normalize: true
167
+ scale: null
168
+ temperature: 10000
169
+
170
+ # Box processing
171
+ use_presence_eval: True
172
+ original_box_postprocessor:
173
+ _target_: sam3.eval.postprocessors.PostProcessImage
174
+ max_dets_per_img: -1 # infinite detections
175
+ use_original_ids: true
176
+ use_original_sizes_box: true
177
+ use_presence: ${scratch.use_presence_eval}
178
+
179
+ # Matcher configuration
180
+ matcher:
181
+ _target_: sam3.train.matcher.BinaryHungarianMatcherV2
182
+ focal: true # with `focal: true` it is equivalent to BinaryFocalHungarianMatcher
183
+ cost_class: 2.0
184
+ cost_bbox: 5.0
185
+ cost_giou: 2.0
186
+ alpha: 0.25
187
+ gamma: 2
188
+ stable: False
189
+ scale_by_find_batch_size: True
190
+
191
+ # Image processing parameters
192
+ resolution: 1008
193
+ consistent_transform: False
194
+ max_ann_per_img: 200
195
+
196
+ # Normalization parameters
197
+ train_norm_mean: [0.5, 0.5, 0.5]
198
+ train_norm_std: [0.5, 0.5, 0.5]
199
+ val_norm_mean: [0.5, 0.5, 0.5]
200
+ val_norm_std: [0.5, 0.5, 0.5]
201
+
202
+ # Training parameters
203
+ num_train_workers: 10
204
+ num_val_workers: 0
205
+ max_data_epochs: 20
206
+ target_epoch_size: 1500
207
+ hybrid_repeats: 1
208
+ context_length: 2
209
+ gather_pred_via_filesys: false
210
+
211
+ # Learning rate and scheduler parameters
212
+ lr_scale: 0.1
213
+ lr_transformer: ${times:8e-4,${scratch.lr_scale}}
214
+ lr_vision_backbone: ${times:2.5e-4,${scratch.lr_scale}}
215
+ lr_language_backbone: ${times:5e-5,${scratch.lr_scale}}
216
+ lrd_vision_backbone: 0.9
217
+ wd: 0.1
218
+ scheduler_timescale: 20
219
+ scheduler_warmup: 20
220
+ scheduler_cooldown: 20
221
+
222
+ val_batch_size: 1
223
+ collate_fn_val:
224
+ _target_: sam3.train.data.collator.collate_fn_api
225
+ _partial_: true
226
+ repeats: ${scratch.hybrid_repeats}
227
+ dict_key: roboflow100
228
+ with_seg_masks: ${scratch.enable_segmentation} # Note: Set this to true if using segmentation masks!
229
+
230
+ gradient_accumulation_steps: 1
231
+ train_batch_size: 1
232
+ collate_fn:
233
+ _target_: sam3.train.data.collator.collate_fn_api
234
+ _partial_: true
235
+ repeats: ${scratch.hybrid_repeats}
236
+ dict_key: all
237
+ with_seg_masks: ${scratch.enable_segmentation} # Note: Set this to true if using segmentation masks!
238
+
239
+ # ============================================================================
240
+ # Trainer Configuration
241
+ # ============================================================================
242
+
243
+ trainer:
244
+
245
+ _target_: sam3.train.trainer.Trainer
246
+ skip_saving_ckpts: true
247
+ empty_gpu_mem_cache_after_eval: True
248
+ skip_first_val: True
249
+ max_epochs: 20
250
+ accelerator: cuda
251
+ seed_value: 123
252
+ val_epoch_freq: 10
253
+ mode: train
254
+ gradient_accumulation_steps: ${scratch.gradient_accumulation_steps}
255
+
256
+ distributed:
257
+ backend: nccl
258
+ find_unused_parameters: True
259
+ gradient_as_bucket_view: True
260
+
261
+ loss:
262
+ all: ${roboflow_train.loss}
263
+ default:
264
+ _target_: sam3.train.loss.sam3_loss.DummyLoss
265
+
266
+ data:
267
+ train:
268
+ _target_: sam3.train.data.torch_dataset.TorchDataset
269
+ dataset:
270
+ _target_: sam3.train.data.sam3_image_dataset.Sam3ImageDataset
271
+ limit_ids: ${roboflow_train.num_images}
272
+ transforms: ${roboflow_train.train_transforms}
273
+ load_segmentation: ${scratch.enable_segmentation}
274
+ max_ann_per_img: 500000
275
+ multiplier: 1
276
+ max_train_queries: 50000
277
+ max_val_queries: 50000
278
+ training: true
279
+ use_caching: False
280
+ img_folder: ${paths.roboflow_vl_100_root}/${roboflow_train.supercategory}/train/
281
+ ann_file: ${paths.roboflow_vl_100_root}/${roboflow_train.supercategory}/train/_annotations.coco.json
282
+
283
+ shuffle: True
284
+ batch_size: ${scratch.train_batch_size}
285
+ num_workers: ${scratch.num_train_workers}
286
+ pin_memory: True
287
+ drop_last: True
288
+ collate_fn: ${scratch.collate_fn}
289
+
290
+ val:
291
+ _target_: sam3.train.data.torch_dataset.TorchDataset
292
+ dataset:
293
+ _target_: sam3.train.data.sam3_image_dataset.Sam3ImageDataset
294
+ load_segmentation: ${scratch.enable_segmentation}
295
+ coco_json_loader:
296
+ _target_: sam3.train.data.coco_json_loaders.COCO_FROM_JSON
297
+ include_negatives: true
298
+ category_chunk_size: 2 # Note: You can increase this based on the memory of your GPU.
299
+ _partial_: true
300
+ img_folder: ${paths.roboflow_vl_100_root}/${roboflow_train.supercategory}/test/
301
+ ann_file: ${paths.roboflow_vl_100_root}/${roboflow_train.supercategory}/test/_annotations.coco.json
302
+ transforms: ${roboflow_train.val_transforms}
303
+ max_ann_per_img: 100000
304
+ multiplier: 1
305
+ training: false
306
+
307
+ shuffle: False
308
+ batch_size: ${scratch.val_batch_size}
309
+ num_workers: ${scratch.num_val_workers}
310
+ pin_memory: True
311
+ drop_last: False
312
+ collate_fn: ${scratch.collate_fn_val}
313
+
314
+
315
+ model:
316
+ _target_: sam3.model_builder.build_sam3_image_model
317
+ bpe_path: ${paths.bpe_path}
318
+ device: cpus
319
+ eval_mode: false
320
+ enable_segmentation: ${scratch.enable_segmentation} # Warning: Enable this if using segmentation.
321
+
322
+ meters:
323
+ val:
324
+ roboflow100:
325
+ detection:
326
+ _target_: sam3.eval.coco_writer.PredictionDumper
327
+ iou_type: "bbox"
328
+ dump_dir: ${launcher.experiment_log_dir}/dumps/roboflow/${roboflow_train.supercategory}
329
+ merge_predictions: True
330
+ postprocessor: ${scratch.original_box_postprocessor}
331
+ gather_pred_via_filesys: ${scratch.gather_pred_via_filesys}
332
+ maxdets: 100
333
+ pred_file_evaluators:
334
+ - _target_: sam3.eval.coco_eval_offline.CocoEvaluatorOfflineWithPredFileEvaluators
335
+ gt_path: ${paths.roboflow_vl_100_root}/${roboflow_train.supercategory}/test/_annotations.coco.json
336
+ tide: False
337
+ iou_type: "bbox"
338
+
339
+ optim:
340
+ amp:
341
+ enabled: True
342
+ amp_dtype: bfloat16
343
+
344
+ optimizer:
345
+ _target_: torch.optim.AdamW
346
+
347
+ gradient_clip:
348
+ _target_: sam3.train.optim.optimizer.GradientClipper
349
+ max_norm: 0.1
350
+ norm_type: 2
351
+
352
+ param_group_modifiers:
353
+ - _target_: sam3.train.optim.optimizer.layer_decay_param_modifier
354
+ _partial_: True
355
+ layer_decay_value: ${scratch.lrd_vision_backbone}
356
+ apply_to: 'backbone.vision_backbone.trunk'
357
+ overrides:
358
+ - pattern: '*pos_embed*'
359
+ value: 1.0
360
+
361
+ options:
362
+ lr:
363
+ - scheduler: # transformer and class_embed
364
+ _target_: sam3.train.optim.schedulers.InverseSquareRootParamScheduler
365
+ base_lr: ${scratch.lr_transformer}
366
+ timescale: ${scratch.scheduler_timescale}
367
+ warmup_steps: ${scratch.scheduler_warmup}
368
+ cooldown_steps: ${scratch.scheduler_cooldown}
369
+ - scheduler:
370
+ _target_: sam3.train.optim.schedulers.InverseSquareRootParamScheduler
371
+ base_lr: ${scratch.lr_vision_backbone}
372
+ timescale: ${scratch.scheduler_timescale}
373
+ warmup_steps: ${scratch.scheduler_warmup}
374
+ cooldown_steps: ${scratch.scheduler_cooldown}
375
+ param_names:
376
+ - 'backbone.vision_backbone.*'
377
+ - scheduler:
378
+ _target_: sam3.train.optim.schedulers.InverseSquareRootParamScheduler
379
+ base_lr: ${scratch.lr_language_backbone}
380
+ timescale: ${scratch.scheduler_timescale}
381
+ warmup_steps: ${scratch.scheduler_warmup}
382
+ cooldown_steps: ${scratch.scheduler_cooldown}
383
+ param_names:
384
+ - 'backbone.language_backbone.*'
385
+
386
+ weight_decay:
387
+ - scheduler:
388
+ _target_: fvcore.common.param_scheduler.ConstantParamScheduler
389
+ value: ${scratch.wd}
390
+ - scheduler:
391
+ _target_: fvcore.common.param_scheduler.ConstantParamScheduler
392
+ value: 0.0
393
+ param_names:
394
+ - '*bias*'
395
+ module_cls_names: ['torch.nn.LayerNorm']
396
+
397
+ checkpoint:
398
+ save_dir: ${launcher.experiment_log_dir}/checkpoints
399
+ save_freq: 0 # 0 only last checkpoint is saved.
400
+
401
+ logging:
402
+ tensorboard_writer:
403
+ _target_: sam3.train.utils.logger.make_tensorboard_logger
404
+ log_dir: ${launcher.experiment_log_dir}/tensorboard
405
+ flush_secs: 120
406
+ should_log: True
407
+ wandb_writer: null
408
+ log_dir: ${launcher.experiment_log_dir}/logs/${roboflow_train.supercategory}
409
+ log_freq: 10
410
+
411
+ # ============================================================================
412
+ # Launcher and Submitit Configuration
413
+ # ============================================================================
414
+
415
+ launcher:
416
+ num_nodes: 1
417
+ gpus_per_node: 2
418
+ experiment_log_dir: ${paths.experiment_log_dir}
419
+ multiprocessing_context: forkserver
420
+
421
+ submitit:
422
+ account: null
423
+ partition: null
424
+ qos: null
425
+ timeout_hour: 72
426
+ use_cluster: True
427
+ cpus_per_task: 10
428
+ port_range: [10000, 65000]
429
+ constraint: null
430
+ # Uncomment for job array configuration
431
+ job_array:
432
+ num_tasks: 100
433
+ task_index: 0
434
+
435
+ # ============================================================================
436
+ # Available Roboflow Supercategories (for reference)
437
+ # ============================================================================
438
+
439
+ all_roboflow_supercategories:
440
+ - -grccs
441
+ - zebrasatasturias
442
+ - cod-mw-warzone
443
+ - canalstenosis
444
+ - label-printing-defect-version-2
445
+ - new-defects-in-wood
446
+ - orionproducts
447
+ - aquarium-combined
448
+ - varroa-mites-detection--test-set
449
+ - clashroyalechardetector
450
+ - stomata-cells
451
+ - halo-infinite-angel-videogame
452
+ - pig-detection
453
+ - urine-analysis1
454
+ - aerial-sheep
455
+ - orgharvest
456
+ - actions
457
+ - mahjong
458
+ - liver-disease
459
+ - needle-base-tip-min-max
460
+ - wheel-defect-detection
461
+ - aircraft-turnaround-dataset
462
+ - xray
463
+ - wildfire-smoke
464
+ - spinefrxnormalvindr
465
+ - ufba-425
466
+ - speech-bubbles-detection
467
+ - train
468
+ - pill
469
+ - truck-movement
470
+ - car-logo-detection
471
+ - inbreast
472
+ - sea-cucumbers-new-tiles
473
+ - uavdet-small
474
+ - penguin-finder-seg
475
+ - aerial-airport
476
+ - bibdetection
477
+ - taco-trash-annotations-in-context
478
+ - bees
479
+ - recode-waste
480
+ - screwdetectclassification
481
+ - wine-labels
482
+ - aerial-cows
483
+ - into-the-vale
484
+ - gwhd2021
485
+ - lacrosse-object-detection
486
+ - defect-detection
487
+ - dataconvert
488
+ - x-ray-id
489
+ - ball
490
+ - tube
491
+ - 2024-frc
492
+ - crystal-clean-brain-tumors-mri-dataset
493
+ - grapes-5
494
+ - human-detection-in-floods
495
+ - buoy-onboarding
496
+ - apoce-aerial-photographs-for-object-detection-of-construction-equipment
497
+ - l10ul502
498
+ - floating-waste
499
+ - deeppcb
500
+ - ism-band-packet-detection
501
+ - weeds4
502
+ - invoice-processing
503
+ - thermal-cheetah
504
+ - tomatoes-2
505
+ - marine-sharks
506
+ - peixos-fish
507
+ - sssod
508
+ - aerial-pool
509
+ - countingpills
510
+ - asphaltdistressdetection
511
+ - roboflow-trained-dataset
512
+ - everdaynew
513
+ - underwater-objects
514
+ - soda-bottles
515
+ - dentalai
516
+ - jellyfish
517
+ - deepfruits
518
+ - activity-diagrams
519
+ - circuit-voltages
520
+ - all-elements
521
+ - macro-segmentation
522
+ - exploratorium-daphnia
523
+ - signatures
524
+ - conveyor-t-shirts
525
+ - fruitjes
526
+ - grass-weeds
527
+ - infraredimageofpowerequipment
528
+ - 13-lkc01
529
+ - wb-prova
530
+ - flir-camera-objects
531
+ - paper-parts
532
+ - football-player-detection
533
+ - trail-camera
534
+ - smd-components
535
+ - water-meter
536
+ - nih-xray
537
+ - the-dreidel-project
538
+ - electric-pylon-detection-in-rsi
539
+ - cable-damage