diff --git a/FateZero-main/data/shape/man_skate/00001.png b/FateZero-main/data/shape/man_skate/00001.png
new file mode 100644
index 0000000000000000000000000000000000000000..777b53ada3c9b6c202409d254c93cb0fad009061
--- /dev/null
+++ b/FateZero-main/data/shape/man_skate/00001.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:205659a37d24a46e736122db9a397134a595a71bad22b25ad42c3ea462371657
+size 404536
diff --git a/FateZero-main/data/shape/man_skate/00004.png b/FateZero-main/data/shape/man_skate/00004.png
new file mode 100644
index 0000000000000000000000000000000000000000..9394e45e89a8443bc2140faba823fdbb91d7a4c4
--- /dev/null
+++ b/FateZero-main/data/shape/man_skate/00004.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3991f5aac5541b1f3bee4d8fc71afeb631abaf72a5bcaf43cf5b1f24ecc58f37
+size 405545
diff --git a/FateZero-main/data/shape/swan_swarov/00007.png b/FateZero-main/data/shape/swan_swarov/00007.png
new file mode 100644
index 0000000000000000000000000000000000000000..a24d04eeb28a457824a9b79c82e7d9c5bf4e525e
--- /dev/null
+++ b/FateZero-main/data/shape/swan_swarov/00007.png
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:3cff4bc82f6ac12b75b7dfdcf7cd2b5756a00d00de4cf91f49730e050a68d4e6
+size 442943
diff --git a/RAVE-main/annotator/keypose/__init__.py b/RAVE-main/annotator/keypose/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..aa3dfa2e1589f22471411b3180ccaf870f147d73
--- /dev/null
+++ b/RAVE-main/annotator/keypose/__init__.py
@@ -0,0 +1,212 @@
+import numpy as np
+import cv2
+import torch
+
+import os
+from modules import devices
+from annotator.annotator_path import models_path
+
+import mmcv
+from mmdet.apis import inference_detector, init_detector
+from mmpose.apis import inference_top_down_pose_model
+from mmpose.apis import init_pose_model, process_mmdet_results, vis_pose_result
+
+
+def preprocessing(image, device):
+ # Resize
+ scale = 640 / max(image.shape[:2])
+ image = cv2.resize(image, dsize=None, fx=scale, fy=scale)
+ raw_image = image.astype(np.uint8)
+
+ # Subtract mean values
+ image = image.astype(np.float32)
+ image -= np.array(
+ [
+ float(104.008),
+ float(116.669),
+ float(122.675),
+ ]
+ )
+
+ # Convert to torch.Tensor and add "batch" axis
+ image = torch.from_numpy(image.transpose(2, 0, 1)).float().unsqueeze(0)
+ image = image.to(device)
+
+ return image, raw_image
+
+
+def imshow_keypoints(img,
+ pose_result,
+ skeleton=None,
+ kpt_score_thr=0.1,
+ pose_kpt_color=None,
+ pose_link_color=None,
+ radius=4,
+ thickness=1):
+ """Draw keypoints and links on an image.
+ Args:
+ img (ndarry): The image to draw poses on.
+ pose_result (list[kpts]): The poses to draw. Each element kpts is
+ a set of K keypoints as an Kx3 numpy.ndarray, where each
+ keypoint is represented as x, y, score.
+ kpt_score_thr (float, optional): Minimum score of keypoints
+ to be shown. Default: 0.3.
+ pose_kpt_color (np.array[Nx3]`): Color of N keypoints. If None,
+ the keypoint will not be drawn.
+ pose_link_color (np.array[Mx3]): Color of M links. If None, the
+ links will not be drawn.
+ thickness (int): Thickness of lines.
+ """
+
+ img_h, img_w, _ = img.shape
+ img = np.zeros(img.shape)
+
+ for idx, kpts in enumerate(pose_result):
+ if idx > 1:
+ continue
+ kpts = kpts['keypoints']
+ # print(kpts)
+ kpts = np.array(kpts, copy=False)
+
+ # draw each point on image
+ if pose_kpt_color is not None:
+ assert len(pose_kpt_color) == len(kpts)
+
+ for kid, kpt in enumerate(kpts):
+ x_coord, y_coord, kpt_score = int(kpt[0]), int(kpt[1]), kpt[2]
+
+ if kpt_score < kpt_score_thr or pose_kpt_color[kid] is None:
+ # skip the point that should not be drawn
+ continue
+
+ color = tuple(int(c) for c in pose_kpt_color[kid])
+ cv2.circle(img, (int(x_coord), int(y_coord)),
+ radius, color, -1)
+
+ # draw links
+ if skeleton is not None and pose_link_color is not None:
+ assert len(pose_link_color) == len(skeleton)
+
+ for sk_id, sk in enumerate(skeleton):
+ pos1 = (int(kpts[sk[0], 0]), int(kpts[sk[0], 1]))
+ pos2 = (int(kpts[sk[1], 0]), int(kpts[sk[1], 1]))
+
+ if (pos1[0] <= 0 or pos1[0] >= img_w or pos1[1] <= 0 or pos1[1] >= img_h or pos2[0] <= 0
+ or pos2[0] >= img_w or pos2[1] <= 0 or pos2[1] >= img_h or kpts[sk[0], 2] < kpt_score_thr
+ or kpts[sk[1], 2] < kpt_score_thr or pose_link_color[sk_id] is None):
+ # skip the link that should not be drawn
+ continue
+ color = tuple(int(c) for c in pose_link_color[sk_id])
+ cv2.line(img, pos1, pos2, color, thickness=thickness)
+
+ return img
+
+
+human_det, pose_model = None, None
+det_model_path = "https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth"
+pose_model_path = "https://download.openmmlab.com/mmpose/top_down/hrnet/hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth"
+
+modeldir = os.path.join(models_path, "keypose")
+old_modeldir = os.path.dirname(os.path.realpath(__file__))
+
+det_config = 'faster_rcnn_r50_fpn_coco.py'
+pose_config = 'hrnet_w48_coco_256x192.py'
+
+det_checkpoint = 'faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth'
+pose_checkpoint = 'hrnet_w48_coco_256x192-b9e0b3ab_20200708.pth'
+det_cat_id = 1
+bbox_thr = 0.2
+
+skeleton = [
+ [15, 13], [13, 11], [16, 14], [14, 12], [11, 12], [5, 11], [6, 12], [5, 6], [5, 7], [6, 8],
+ [7, 9], [8, 10],
+ [1, 2], [0, 1], [0, 2], [1, 3], [2, 4], [3, 5], [4, 6]
+]
+
+pose_kpt_color = [
+ [51, 153, 255], [51, 153, 255], [51, 153, 255], [51, 153, 255], [51, 153, 255],
+ [0, 255, 0],
+ [255, 128, 0], [0, 255, 0], [255, 128, 0], [0, 255, 0], [255, 128, 0], [0, 255, 0],
+ [255, 128, 0],
+ [0, 255, 0], [255, 128, 0], [0, 255, 0], [255, 128, 0]
+]
+
+pose_link_color = [
+ [0, 255, 0], [0, 255, 0], [255, 128, 0], [255, 128, 0],
+ [51, 153, 255], [51, 153, 255], [51, 153, 255], [51, 153, 255], [0, 255, 0],
+ [255, 128, 0],
+ [0, 255, 0], [255, 128, 0], [51, 153, 255], [51, 153, 255], [51, 153, 255],
+ [51, 153, 255],
+ [51, 153, 255], [51, 153, 255], [51, 153, 255]
+]
+
+def find_download_model(checkpoint, remote_path):
+ modelpath = os.path.join(modeldir, checkpoint)
+ old_modelpath = os.path.join(old_modeldir, checkpoint)
+
+ if os.path.exists(old_modelpath):
+ modelpath = old_modelpath
+ elif not os.path.exists(modelpath):
+ from basicsr.utils.download_util import load_file_from_url
+ load_file_from_url(remote_path, model_dir=modeldir)
+
+ return modelpath
+
+def apply_keypose(input_image):
+ global human_det, pose_model
+ if netNetwork is None:
+ det_model_local = find_download_model(det_checkpoint, det_model_path)
+ hrnet_model_local = find_download_model(pose_checkpoint, pose_model_path)
+ det_config_mmcv = mmcv.Config.fromfile(det_config)
+ pose_config_mmcv = mmcv.Config.fromfile(pose_config)
+ human_det = init_detector(det_config_mmcv, det_model_local, device=devices.get_device_for("controlnet"))
+ pose_model = init_pose_model(pose_config_mmcv, hrnet_model_local, device=devices.get_device_for("controlnet"))
+
+ assert input_image.ndim == 3
+ input_image = input_image.copy()
+ with torch.no_grad():
+ image = torch.from_numpy(input_image).float().to(devices.get_device_for("controlnet"))
+ image = image / 255.0
+ mmdet_results = inference_detector(human_det, image)
+
+ # keep the person class bounding boxes.
+ person_results = process_mmdet_results(mmdet_results, det_cat_id)
+
+ return_heatmap = False
+ dataset = pose_model.cfg.data['test']['type']
+
+ # e.g. use ('backbone', ) to return backbone feature
+ output_layer_names = None
+ pose_results, _ = inference_top_down_pose_model(
+ pose_model,
+ image,
+ person_results,
+ bbox_thr=bbox_thr,
+ format='xyxy',
+ dataset=dataset,
+ dataset_info=None,
+ return_heatmap=return_heatmap,
+ outputs=output_layer_names
+ )
+
+ im_keypose_out = imshow_keypoints(
+ image,
+ pose_results,
+ skeleton=skeleton,
+ pose_kpt_color=pose_kpt_color,
+ pose_link_color=pose_link_color,
+ radius=2,
+ thickness=2
+ )
+ im_keypose_out = im_keypose_out.astype(np.uint8)
+
+ # image_hed = rearrange(image_hed, 'h w c -> 1 c h w')
+ # edge = netNetwork(image_hed)[0]
+ # edge = (edge.cpu().numpy() * 255.0).clip(0, 255).astype(np.uint8)
+ return im_keypose_out
+
+
+def unload_hed_model():
+ global netNetwork
+ if netNetwork is not None:
+ netNetwork.cpu()
diff --git a/RAVE-main/annotator/keypose/faster_rcnn_r50_fpn_coco.py b/RAVE-main/annotator/keypose/faster_rcnn_r50_fpn_coco.py
new file mode 100644
index 0000000000000000000000000000000000000000..a9ad9528b22163ae7ce1390375b69227fd6eafd9
--- /dev/null
+++ b/RAVE-main/annotator/keypose/faster_rcnn_r50_fpn_coco.py
@@ -0,0 +1,182 @@
+checkpoint_config = dict(interval=1)
+# yapf:disable
+log_config = dict(
+ interval=50,
+ hooks=[
+ dict(type='TextLoggerHook'),
+ # dict(type='TensorboardLoggerHook')
+ ])
+# yapf:enable
+dist_params = dict(backend='nccl')
+log_level = 'INFO'
+load_from = None
+resume_from = None
+workflow = [('train', 1)]
+# optimizer
+optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
+optimizer_config = dict(grad_clip=None)
+# learning policy
+lr_config = dict(
+ policy='step',
+ warmup='linear',
+ warmup_iters=500,
+ warmup_ratio=0.001,
+ step=[8, 11])
+total_epochs = 12
+
+model = dict(
+ type='FasterRCNN',
+ pretrained='torchvision://resnet50',
+ backbone=dict(
+ type='ResNet',
+ depth=50,
+ num_stages=4,
+ out_indices=(0, 1, 2, 3),
+ frozen_stages=1,
+ norm_cfg=dict(type='BN', requires_grad=True),
+ norm_eval=True,
+ style='pytorch'),
+ neck=dict(
+ type='FPN',
+ in_channels=[256, 512, 1024, 2048],
+ out_channels=256,
+ num_outs=5),
+ rpn_head=dict(
+ type='RPNHead',
+ in_channels=256,
+ feat_channels=256,
+ anchor_generator=dict(
+ type='AnchorGenerator',
+ scales=[8],
+ ratios=[0.5, 1.0, 2.0],
+ strides=[4, 8, 16, 32, 64]),
+ bbox_coder=dict(
+ type='DeltaXYWHBBoxCoder',
+ target_means=[.0, .0, .0, .0],
+ target_stds=[1.0, 1.0, 1.0, 1.0]),
+ loss_cls=dict(
+ type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
+ loss_bbox=dict(type='L1Loss', loss_weight=1.0)),
+ roi_head=dict(
+ type='StandardRoIHead',
+ bbox_roi_extractor=dict(
+ type='SingleRoIExtractor',
+ roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
+ out_channels=256,
+ featmap_strides=[4, 8, 16, 32]),
+ bbox_head=dict(
+ type='Shared2FCBBoxHead',
+ in_channels=256,
+ fc_out_channels=1024,
+ roi_feat_size=7,
+ num_classes=80,
+ bbox_coder=dict(
+ type='DeltaXYWHBBoxCoder',
+ target_means=[0., 0., 0., 0.],
+ target_stds=[0.1, 0.1, 0.2, 0.2]),
+ reg_class_agnostic=False,
+ loss_cls=dict(
+ type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
+ loss_bbox=dict(type='L1Loss', loss_weight=1.0))),
+ # model training and testing settings
+ train_cfg=dict(
+ rpn=dict(
+ assigner=dict(
+ type='MaxIoUAssigner',
+ pos_iou_thr=0.7,
+ neg_iou_thr=0.3,
+ min_pos_iou=0.3,
+ match_low_quality=True,
+ ignore_iof_thr=-1),
+ sampler=dict(
+ type='RandomSampler',
+ num=256,
+ pos_fraction=0.5,
+ neg_pos_ub=-1,
+ add_gt_as_proposals=False),
+ allowed_border=-1,
+ pos_weight=-1,
+ debug=False),
+ rpn_proposal=dict(
+ nms_pre=2000,
+ max_per_img=1000,
+ nms=dict(type='nms', iou_threshold=0.7),
+ min_bbox_size=0),
+ rcnn=dict(
+ assigner=dict(
+ type='MaxIoUAssigner',
+ pos_iou_thr=0.5,
+ neg_iou_thr=0.5,
+ min_pos_iou=0.5,
+ match_low_quality=False,
+ ignore_iof_thr=-1),
+ sampler=dict(
+ type='RandomSampler',
+ num=512,
+ pos_fraction=0.25,
+ neg_pos_ub=-1,
+ add_gt_as_proposals=True),
+ pos_weight=-1,
+ debug=False)),
+ test_cfg=dict(
+ rpn=dict(
+ nms_pre=1000,
+ max_per_img=1000,
+ nms=dict(type='nms', iou_threshold=0.7),
+ min_bbox_size=0),
+ rcnn=dict(
+ score_thr=0.05,
+ nms=dict(type='nms', iou_threshold=0.5),
+ max_per_img=100)
+ # soft-nms is also supported for rcnn testing
+ # e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05)
+ ))
+
+dataset_type = 'CocoDataset'
+data_root = 'data/coco'
+img_norm_cfg = dict(
+ mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='LoadAnnotations', with_bbox=True),
+ dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
+ dict(type='RandomFlip', flip_ratio=0.5),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='Pad', size_divisor=32),
+ dict(type='DefaultFormatBundle'),
+ dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']),
+]
+test_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(
+ type='MultiScaleFlipAug',
+ img_scale=(1333, 800),
+ flip=False,
+ transforms=[
+ dict(type='Resize', keep_ratio=True),
+ dict(type='RandomFlip'),
+ dict(type='Normalize', **img_norm_cfg),
+ dict(type='Pad', size_divisor=32),
+ dict(type='DefaultFormatBundle'),
+ dict(type='Collect', keys=['img']),
+ ])
+]
+data = dict(
+ samples_per_gpu=2,
+ workers_per_gpu=2,
+ train=dict(
+ type=dataset_type,
+ ann_file=f'{data_root}/annotations/instances_train2017.json',
+ img_prefix=f'{data_root}/train2017/',
+ pipeline=train_pipeline),
+ val=dict(
+ type=dataset_type,
+ ann_file=f'{data_root}/annotations/instances_val2017.json',
+ img_prefix=f'{data_root}/val2017/',
+ pipeline=test_pipeline),
+ test=dict(
+ type=dataset_type,
+ ann_file=f'{data_root}/annotations/instances_val2017.json',
+ img_prefix=f'{data_root}/val2017/',
+ pipeline=test_pipeline))
+evaluation = dict(interval=1, metric='bbox')
diff --git a/RAVE-main/annotator/keypose/hrnet_w48_coco_256x192.py b/RAVE-main/annotator/keypose/hrnet_w48_coco_256x192.py
new file mode 100644
index 0000000000000000000000000000000000000000..9755e6773cd3a8c0d2ac684c612d716cfd44b0ca
--- /dev/null
+++ b/RAVE-main/annotator/keypose/hrnet_w48_coco_256x192.py
@@ -0,0 +1,169 @@
+# _base_ = [
+# '../../../../_base_/default_runtime.py',
+# '../../../../_base_/datasets/coco.py'
+# ]
+evaluation = dict(interval=10, metric='mAP', save_best='AP')
+
+optimizer = dict(
+ type='Adam',
+ lr=5e-4,
+)
+optimizer_config = dict(grad_clip=None)
+# learning policy
+lr_config = dict(
+ policy='step',
+ warmup='linear',
+ warmup_iters=500,
+ warmup_ratio=0.001,
+ step=[170, 200])
+total_epochs = 210
+channel_cfg = dict(
+ num_output_channels=17,
+ dataset_joints=17,
+ dataset_channel=[
+ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16],
+ ],
+ inference_channel=[
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16
+ ])
+
+# model settings
+model = dict(
+ type='TopDown',
+ pretrained='https://download.openmmlab.com/mmpose/'
+ 'pretrain_models/hrnet_w48-8ef0771d.pth',
+ backbone=dict(
+ type='HRNet',
+ in_channels=3,
+ extra=dict(
+ stage1=dict(
+ num_modules=1,
+ num_branches=1,
+ block='BOTTLENECK',
+ num_blocks=(4, ),
+ num_channels=(64, )),
+ stage2=dict(
+ num_modules=1,
+ num_branches=2,
+ block='BASIC',
+ num_blocks=(4, 4),
+ num_channels=(48, 96)),
+ stage3=dict(
+ num_modules=4,
+ num_branches=3,
+ block='BASIC',
+ num_blocks=(4, 4, 4),
+ num_channels=(48, 96, 192)),
+ stage4=dict(
+ num_modules=3,
+ num_branches=4,
+ block='BASIC',
+ num_blocks=(4, 4, 4, 4),
+ num_channels=(48, 96, 192, 384))),
+ ),
+ keypoint_head=dict(
+ type='TopdownHeatmapSimpleHead',
+ in_channels=48,
+ out_channels=channel_cfg['num_output_channels'],
+ num_deconv_layers=0,
+ extra=dict(final_conv_kernel=1, ),
+ loss_keypoint=dict(type='JointsMSELoss', use_target_weight=True)),
+ train_cfg=dict(),
+ test_cfg=dict(
+ flip_test=True,
+ post_process='default',
+ shift_heatmap=True,
+ modulate_kernel=11))
+
+data_cfg = dict(
+ image_size=[192, 256],
+ heatmap_size=[48, 64],
+ num_output_channels=channel_cfg['num_output_channels'],
+ num_joints=channel_cfg['dataset_joints'],
+ dataset_channel=channel_cfg['dataset_channel'],
+ inference_channel=channel_cfg['inference_channel'],
+ soft_nms=False,
+ nms_thr=1.0,
+ oks_thr=0.9,
+ vis_thr=0.2,
+ use_gt_bbox=False,
+ det_bbox_thr=0.0,
+ bbox_file='data/coco/person_detection_results/'
+ 'COCO_val2017_detections_AP_H_56_person.json',
+)
+
+train_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='TopDownGetBboxCenterScale', padding=1.25),
+ dict(type='TopDownRandomShiftBboxCenter', shift_factor=0.16, prob=0.3),
+ dict(type='TopDownRandomFlip', flip_prob=0.5),
+ dict(
+ type='TopDownHalfBodyTransform',
+ num_joints_half_body=8,
+ prob_half_body=0.3),
+ dict(
+ type='TopDownGetRandomScaleRotation', rot_factor=40, scale_factor=0.5),
+ dict(type='TopDownAffine'),
+ dict(type='ToTensor'),
+ dict(
+ type='NormalizeTensor',
+ mean=[0.485, 0.456, 0.406],
+ std=[0.229, 0.224, 0.225]),
+ dict(type='TopDownGenerateTarget', sigma=2),
+ dict(
+ type='Collect',
+ keys=['img', 'target', 'target_weight'],
+ meta_keys=[
+ 'image_file', 'joints_3d', 'joints_3d_visible', 'center', 'scale',
+ 'rotation', 'bbox_score', 'flip_pairs'
+ ]),
+]
+
+val_pipeline = [
+ dict(type='LoadImageFromFile'),
+ dict(type='TopDownGetBboxCenterScale', padding=1.25),
+ dict(type='TopDownAffine'),
+ dict(type='ToTensor'),
+ dict(
+ type='NormalizeTensor',
+ mean=[0.485, 0.456, 0.406],
+ std=[0.229, 0.224, 0.225]),
+ dict(
+ type='Collect',
+ keys=['img'],
+ meta_keys=[
+ 'image_file', 'center', 'scale', 'rotation', 'bbox_score',
+ 'flip_pairs'
+ ]),
+]
+
+test_pipeline = val_pipeline
+
+data_root = 'data/coco'
+data = dict(
+ samples_per_gpu=32,
+ workers_per_gpu=2,
+ val_dataloader=dict(samples_per_gpu=32),
+ test_dataloader=dict(samples_per_gpu=32),
+ train=dict(
+ type='TopDownCocoDataset',
+ ann_file=f'{data_root}/annotations/person_keypoints_train2017.json',
+ img_prefix=f'{data_root}/train2017/',
+ data_cfg=data_cfg,
+ pipeline=train_pipeline,
+ dataset_info={{_base_.dataset_info}}),
+ val=dict(
+ type='TopDownCocoDataset',
+ ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
+ img_prefix=f'{data_root}/val2017/',
+ data_cfg=data_cfg,
+ pipeline=val_pipeline,
+ dataset_info={{_base_.dataset_info}}),
+ test=dict(
+ type='TopDownCocoDataset',
+ ann_file=f'{data_root}/annotations/person_keypoints_val2017.json',
+ img_prefix=f'{data_root}/val2017/',
+ data_cfg=data_cfg,
+ pipeline=test_pipeline,
+ dataset_info={{_base_.dataset_info}}),
+)
diff --git a/RAVE-main/annotator/mediapipe_face/__init__.py b/RAVE-main/annotator/mediapipe_face/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f74edfb187e4e39583ed92bfe69ea29c42a34ddc
--- /dev/null
+++ b/RAVE-main/annotator/mediapipe_face/__init__.py
@@ -0,0 +1,5 @@
+from .mediapipe_face_common import generate_annotation
+
+
+def apply_mediapipe_face(image, max_faces: int = 1, min_confidence: float = 0.5):
+ return generate_annotation(image, max_faces, min_confidence)
diff --git a/RAVE-main/annotator/mediapipe_face/mediapipe_face_common.py b/RAVE-main/annotator/mediapipe_face/mediapipe_face_common.py
new file mode 100644
index 0000000000000000000000000000000000000000..0f7d3701dc40eee88977f17a877fa800d0ae328d
--- /dev/null
+++ b/RAVE-main/annotator/mediapipe_face/mediapipe_face_common.py
@@ -0,0 +1,155 @@
+from typing import Mapping
+
+import mediapipe as mp
+import numpy
+
+
+mp_drawing = mp.solutions.drawing_utils
+mp_drawing_styles = mp.solutions.drawing_styles
+mp_face_detection = mp.solutions.face_detection # Only for counting faces.
+mp_face_mesh = mp.solutions.face_mesh
+mp_face_connections = mp.solutions.face_mesh_connections.FACEMESH_TESSELATION
+mp_hand_connections = mp.solutions.hands_connections.HAND_CONNECTIONS
+mp_body_connections = mp.solutions.pose_connections.POSE_CONNECTIONS
+
+DrawingSpec = mp.solutions.drawing_styles.DrawingSpec
+PoseLandmark = mp.solutions.drawing_styles.PoseLandmark
+
+min_face_size_pixels: int = 64
+f_thick = 2
+f_rad = 1
+right_iris_draw = DrawingSpec(color=(10, 200, 250), thickness=f_thick, circle_radius=f_rad)
+right_eye_draw = DrawingSpec(color=(10, 200, 180), thickness=f_thick, circle_radius=f_rad)
+right_eyebrow_draw = DrawingSpec(color=(10, 220, 180), thickness=f_thick, circle_radius=f_rad)
+left_iris_draw = DrawingSpec(color=(250, 200, 10), thickness=f_thick, circle_radius=f_rad)
+left_eye_draw = DrawingSpec(color=(180, 200, 10), thickness=f_thick, circle_radius=f_rad)
+left_eyebrow_draw = DrawingSpec(color=(180, 220, 10), thickness=f_thick, circle_radius=f_rad)
+mouth_draw = DrawingSpec(color=(10, 180, 10), thickness=f_thick, circle_radius=f_rad)
+head_draw = DrawingSpec(color=(10, 200, 10), thickness=f_thick, circle_radius=f_rad)
+
+# mp_face_mesh.FACEMESH_CONTOURS has all the items we care about.
+face_connection_spec = {}
+for edge in mp_face_mesh.FACEMESH_FACE_OVAL:
+ face_connection_spec[edge] = head_draw
+for edge in mp_face_mesh.FACEMESH_LEFT_EYE:
+ face_connection_spec[edge] = left_eye_draw
+for edge in mp_face_mesh.FACEMESH_LEFT_EYEBROW:
+ face_connection_spec[edge] = left_eyebrow_draw
+# for edge in mp_face_mesh.FACEMESH_LEFT_IRIS:
+# face_connection_spec[edge] = left_iris_draw
+for edge in mp_face_mesh.FACEMESH_RIGHT_EYE:
+ face_connection_spec[edge] = right_eye_draw
+for edge in mp_face_mesh.FACEMESH_RIGHT_EYEBROW:
+ face_connection_spec[edge] = right_eyebrow_draw
+# for edge in mp_face_mesh.FACEMESH_RIGHT_IRIS:
+# face_connection_spec[edge] = right_iris_draw
+for edge in mp_face_mesh.FACEMESH_LIPS:
+ face_connection_spec[edge] = mouth_draw
+iris_landmark_spec = {468: right_iris_draw, 473: left_iris_draw}
+
+
+def draw_pupils(image, landmark_list, drawing_spec, halfwidth: int = 2):
+ """We have a custom function to draw the pupils because the mp.draw_landmarks method requires a parameter for all
+ landmarks. Until our PR is merged into mediapipe, we need this separate method."""
+ if len(image.shape) != 3:
+ raise ValueError("Input image must be H,W,C.")
+ image_rows, image_cols, image_channels = image.shape
+ if image_channels != 3: # BGR channels
+ raise ValueError('Input image must contain three channel bgr data.')
+ for idx, landmark in enumerate(landmark_list.landmark):
+ if (
+ (landmark.HasField('visibility') and landmark.visibility < 0.9) or
+ (landmark.HasField('presence') and landmark.presence < 0.5)
+ ):
+ continue
+ if landmark.x >= 1.0 or landmark.x < 0 or landmark.y >= 1.0 or landmark.y < 0:
+ continue
+ image_x = int(image_cols*landmark.x)
+ image_y = int(image_rows*landmark.y)
+ draw_color = None
+ if isinstance(drawing_spec, Mapping):
+ if drawing_spec.get(idx) is None:
+ continue
+ else:
+ draw_color = drawing_spec[idx].color
+ elif isinstance(drawing_spec, DrawingSpec):
+ draw_color = drawing_spec.color
+ image[image_y-halfwidth:image_y+halfwidth, image_x-halfwidth:image_x+halfwidth, :] = draw_color
+
+
+def reverse_channels(image):
+ """Given a numpy array in RGB form, convert to BGR. Will also convert from BGR to RGB."""
+ # im[:,:,::-1] is a neat hack to convert BGR to RGB by reversing the indexing order.
+ # im[:,:,::[2,1,0]] would also work but makes a copy of the data.
+ return image[:, :, ::-1]
+
+
+def generate_annotation(
+ img_rgb,
+ max_faces: int,
+ min_confidence: float
+):
+ """
+ Find up to 'max_faces' inside the provided input image.
+ If min_face_size_pixels is provided and nonzero it will be used to filter faces that occupy less than this many
+ pixels in the image.
+ """
+ with mp_face_mesh.FaceMesh(
+ static_image_mode=True,
+ max_num_faces=max_faces,
+ refine_landmarks=True,
+ min_detection_confidence=min_confidence,
+ ) as facemesh:
+ img_height, img_width, img_channels = img_rgb.shape
+ assert(img_channels == 3)
+
+ results = facemesh.process(img_rgb).multi_face_landmarks
+
+ if results is None:
+ print("No faces detected in controlnet image for Mediapipe face annotator.")
+ return numpy.zeros_like(img_rgb)
+
+ # Filter faces that are too small
+ filtered_landmarks = []
+ for lm in results:
+ landmarks = lm.landmark
+ face_rect = [
+ landmarks[0].x,
+ landmarks[0].y,
+ landmarks[0].x,
+ landmarks[0].y,
+ ] # Left, up, right, down.
+ for i in range(len(landmarks)):
+ face_rect[0] = min(face_rect[0], landmarks[i].x)
+ face_rect[1] = min(face_rect[1], landmarks[i].y)
+ face_rect[2] = max(face_rect[2], landmarks[i].x)
+ face_rect[3] = max(face_rect[3], landmarks[i].y)
+ if min_face_size_pixels > 0:
+ face_width = abs(face_rect[2] - face_rect[0])
+ face_height = abs(face_rect[3] - face_rect[1])
+ face_width_pixels = face_width * img_width
+ face_height_pixels = face_height * img_height
+ face_size = min(face_width_pixels, face_height_pixels)
+ if face_size >= min_face_size_pixels:
+ filtered_landmarks.append(lm)
+ else:
+ filtered_landmarks.append(lm)
+
+ # Annotations are drawn in BGR for some reason, but we don't need to flip a zero-filled image at the start.
+ empty = numpy.zeros_like(img_rgb)
+
+ # Draw detected faces:
+ for face_landmarks in filtered_landmarks:
+ mp_drawing.draw_landmarks(
+ empty,
+ face_landmarks,
+ connections=face_connection_spec.keys(),
+ landmark_drawing_spec=None,
+ connection_drawing_spec=face_connection_spec
+ )
+ draw_pupils(empty, face_landmarks, iris_landmark_spec, 2)
+
+ # Flip BGR back to RGB.
+ empty = reverse_channels(empty).copy()
+
+ return empty
diff --git a/RAVE-main/annotator/zoe/LICENSE b/RAVE-main/annotator/zoe/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..7a1e90d007836c327846ce8e5151013b115042ab
--- /dev/null
+++ b/RAVE-main/annotator/zoe/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2022 Intelligent Systems Lab Org
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
\ No newline at end of file
diff --git a/RAVE-main/annotator/zoe/__init__.py b/RAVE-main/annotator/zoe/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..c70d0a77494120411902d759be8ed6db8fe66b50
--- /dev/null
+++ b/RAVE-main/annotator/zoe/__init__.py
@@ -0,0 +1,59 @@
+import os
+import cv2
+import numpy as np
+import torch
+
+from einops import rearrange
+from .zoedepth.models.zoedepth.zoedepth_v1 import ZoeDepth
+from .zoedepth.utils.config import get_config
+from annotator.annotator_path import models_path, DEVICE
+
+
+
+class ZoeDetector:
+ model_dir = os.path.join(models_path, "zoedepth")
+
+ def __init__(self):
+ self.model = None
+ self.device = DEVICE
+
+ def load_model(self):
+ remote_model_path = "https://huggingface.co/lllyasviel/Annotators/resolve/main/ZoeD_M12_N.pt"
+ modelpath = os.path.join(self.model_dir, "ZoeD_M12_N.pt")
+ if not os.path.exists(modelpath):
+ from basicsr.utils.download_util import load_file_from_url
+ load_file_from_url(remote_model_path, model_dir=self.model_dir)
+ conf = get_config("zoedepth", "infer")
+ model = ZoeDepth.build_from_config(conf)
+ model.load_state_dict(torch.load(modelpath, map_location=model.device)['model'])
+ model.eval()
+ self.model = model.to(self.device)
+
+ def unload_model(self):
+ if self.model is not None:
+ self.model.cpu()
+
+ def __call__(self, input_image):
+ if self.model is None:
+ self.load_model()
+ self.model.to(self.device)
+
+ assert input_image.ndim == 3
+ image_depth = input_image
+ with torch.no_grad():
+ image_depth = torch.from_numpy(image_depth).float().to(self.device)
+ image_depth = image_depth / 255.0
+ image_depth = rearrange(image_depth, 'h w c -> 1 c h w')
+ depth = self.model.infer(image_depth)
+
+ depth = depth[0, 0].cpu().numpy()
+
+ vmin = np.percentile(depth, 2)
+ vmax = np.percentile(depth, 85)
+
+ depth -= vmin
+ depth /= vmax - vmin
+ depth = 1.0 - depth
+ depth_image = (depth * 255.0).clip(0, 255).astype(np.uint8)
+
+ return depth_image
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/__pycache__/__init__.cpython-38.pyc b/RAVE-main/annotator/zoe/zoedepth/models/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f688534ee2cb064e7870d3aa38c61439358cad7d
Binary files /dev/null and b/RAVE-main/annotator/zoe/zoedepth/models/__pycache__/__init__.cpython-38.pyc differ
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/__pycache__/depth_model.cpython-38.pyc b/RAVE-main/annotator/zoe/zoedepth/models/__pycache__/depth_model.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3e774955f26808a7eb17a1101ccc1554aeff1b78
Binary files /dev/null and b/RAVE-main/annotator/zoe/zoedepth/models/__pycache__/depth_model.cpython-38.pyc differ
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/__pycache__/model_io.cpython-38.pyc b/RAVE-main/annotator/zoe/zoedepth/models/__pycache__/model_io.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f5d5168dff5b2df7ca56d2d8554fdeaaadf77206
Binary files /dev/null and b/RAVE-main/annotator/zoe/zoedepth/models/__pycache__/model_io.cpython-38.pyc differ
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/__pycache__/__init__.cpython-38.pyc b/RAVE-main/annotator/zoe/zoedepth/models/base_models/__pycache__/__init__.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..13ee486e59f01587bf302339542351a5917f7aea
Binary files /dev/null and b/RAVE-main/annotator/zoe/zoedepth/models/base_models/__pycache__/__init__.cpython-38.pyc differ
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/__pycache__/midas.cpython-38.pyc b/RAVE-main/annotator/zoe/zoedepth/models/base_models/__pycache__/midas.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..639204cd446901669538405eeb51660060bb5dc3
Binary files /dev/null and b/RAVE-main/annotator/zoe/zoedepth/models/base_models/__pycache__/midas.cpython-38.pyc differ
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/__pycache__/hubconf.cpython-38.pyc b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/__pycache__/hubconf.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..05ca4e4971de9421ad7022ac9394a7afb35e2d12
Binary files /dev/null and b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/__pycache__/hubconf.cpython-38.pyc differ
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/__pycache__/base_model.cpython-38.pyc b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/__pycache__/base_model.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c3a4fa02fce8ab185ef6a62c004d42f50dd76bf7
Binary files /dev/null and b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/__pycache__/base_model.cpython-38.pyc differ
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/__pycache__/blocks.cpython-38.pyc b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/__pycache__/blocks.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4280d84dd10d51a89f88c40e7af38eee08775293
Binary files /dev/null and b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/__pycache__/blocks.cpython-38.pyc differ
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/__pycache__/dpt_depth.cpython-38.pyc b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/__pycache__/dpt_depth.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..36546ec52175782b10b485382fed4d7283309818
Binary files /dev/null and b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/__pycache__/dpt_depth.cpython-38.pyc differ
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/__pycache__/midas_net.cpython-38.pyc b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/__pycache__/midas_net.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ed87ff2c773128b231035132240bfd2a2145ea5f
Binary files /dev/null and b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/__pycache__/midas_net.cpython-38.pyc differ
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/__pycache__/midas_net_custom.cpython-38.pyc b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/__pycache__/midas_net_custom.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b1532dcf8a8091fa5cfbaca906c1d74910ee4af2
Binary files /dev/null and b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/__pycache__/midas_net_custom.cpython-38.pyc differ
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/__pycache__/beit.cpython-38.pyc b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/__pycache__/beit.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c66fd5ea77177f90d3661cc480af36138ce4e060
Binary files /dev/null and b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/__pycache__/beit.cpython-38.pyc differ
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/__pycache__/levit.cpython-38.pyc b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/__pycache__/levit.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..be777fa2343e62620ade378c2781eee7a3a15bc2
Binary files /dev/null and b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/__pycache__/levit.cpython-38.pyc differ
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/__pycache__/swin.cpython-38.pyc b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/__pycache__/swin.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b9d1c4afb2b321255e2455ab55e8c1b1ec67252a
Binary files /dev/null and b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/__pycache__/swin.cpython-38.pyc differ
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/__pycache__/swin2.cpython-38.pyc b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/__pycache__/swin2.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..370c7a6b44eb85da29dfb5e679445e7b72c1240e
Binary files /dev/null and b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/__pycache__/swin2.cpython-38.pyc differ
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/__pycache__/swin_common.cpython-38.pyc b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/__pycache__/swin_common.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bef82f7d81b80816faed57a1624cada12c97a445
Binary files /dev/null and b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/__pycache__/swin_common.cpython-38.pyc differ
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/__pycache__/utils.cpython-38.pyc b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/__pycache__/utils.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3e66e74d612932aca06e03d8a67debbb9afc8cae
Binary files /dev/null and b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/__pycache__/utils.cpython-38.pyc differ
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/__pycache__/vit.cpython-38.pyc b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/__pycache__/vit.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e7b298f8db5f0bb5ed30438e65013878bcbfe294
Binary files /dev/null and b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/__pycache__/vit.cpython-38.pyc differ
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/beit.py b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/beit.py
new file mode 100644
index 0000000000000000000000000000000000000000..ef8c68186339bc7a184df6f667ba1e81396ee4b5
--- /dev/null
+++ b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/beit.py
@@ -0,0 +1,198 @@
+import timm
+import torch
+import types
+
+import numpy as np
+import torch.nn.functional as F
+
+from .utils import forward_adapted_unflatten, make_backbone_default
+from timm.models.beit import gen_relative_position_index
+from torch.utils.checkpoint import checkpoint
+from typing import Optional
+
+
+def forward_beit(pretrained, x):
+ return forward_adapted_unflatten(pretrained, x, "forward_features")
+
+
+def patch_embed_forward(self, x):
+ """
+ Modification of timm.models.layers.patch_embed.py: PatchEmbed.forward to support arbitrary window sizes.
+ """
+ x = self.proj(x)
+ if self.flatten:
+ x = x.flatten(2).transpose(1, 2)
+ x = self.norm(x)
+ return x
+
+
+def _get_rel_pos_bias(self, window_size):
+ """
+ Modification of timm.models.beit.py: Attention._get_rel_pos_bias to support arbitrary window sizes.
+ """
+ old_height = 2 * self.window_size[0] - 1
+ old_width = 2 * self.window_size[1] - 1
+
+ new_height = 2 * window_size[0] - 1
+ new_width = 2 * window_size[1] - 1
+
+ old_relative_position_bias_table = self.relative_position_bias_table
+
+ old_num_relative_distance = self.num_relative_distance
+ new_num_relative_distance = new_height * new_width + 3
+
+ old_sub_table = old_relative_position_bias_table[:old_num_relative_distance - 3]
+
+ old_sub_table = old_sub_table.reshape(1, old_width, old_height, -1).permute(0, 3, 1, 2)
+ new_sub_table = F.interpolate(old_sub_table, size=(new_height, new_width), mode="bilinear")
+ new_sub_table = new_sub_table.permute(0, 2, 3, 1).reshape(new_num_relative_distance - 3, -1)
+
+ new_relative_position_bias_table = torch.cat(
+ [new_sub_table, old_relative_position_bias_table[old_num_relative_distance - 3:]])
+
+ key = str(window_size[1]) + "," + str(window_size[0])
+ if key not in self.relative_position_indices.keys():
+ self.relative_position_indices[key] = gen_relative_position_index(window_size)
+
+ relative_position_bias = new_relative_position_bias_table[
+ self.relative_position_indices[key].view(-1)].view(
+ window_size[0] * window_size[1] + 1,
+ window_size[0] * window_size[1] + 1, -1) # Wh*Ww,Wh*Ww,nH
+ relative_position_bias = relative_position_bias.permute(2, 0, 1).contiguous() # nH, Wh*Ww, Wh*Ww
+ return relative_position_bias.unsqueeze(0)
+
+
+def attention_forward(self, x, resolution, shared_rel_pos_bias: Optional[torch.Tensor] = None):
+ """
+ Modification of timm.models.beit.py: Attention.forward to support arbitrary window sizes.
+ """
+ B, N, C = x.shape
+
+ qkv_bias = torch.cat((self.q_bias, self.k_bias, self.v_bias)) if self.q_bias is not None else None
+ qkv = F.linear(input=x, weight=self.qkv.weight, bias=qkv_bias)
+ qkv = qkv.reshape(B, N, 3, self.num_heads, -1).permute(2, 0, 3, 1, 4)
+ q, k, v = qkv.unbind(0) # make torchscript happy (cannot use tensor as tuple)
+
+ q = q * self.scale
+ attn = (q @ k.transpose(-2, -1))
+
+ if self.relative_position_bias_table is not None:
+ window_size = tuple(np.array(resolution) // 16)
+ attn = attn + self._get_rel_pos_bias(window_size)
+ if shared_rel_pos_bias is not None:
+ attn = attn + shared_rel_pos_bias
+
+ attn = attn.softmax(dim=-1)
+ attn = self.attn_drop(attn)
+
+ x = (attn @ v).transpose(1, 2).reshape(B, N, -1)
+ x = self.proj(x)
+ x = self.proj_drop(x)
+ return x
+
+
+def block_forward(self, x, resolution, shared_rel_pos_bias: Optional[torch.Tensor] = None):
+ """
+ Modification of timm.models.beit.py: Block.forward to support arbitrary window sizes.
+ """
+ if hasattr(self, 'drop_path1') and not hasattr(self, 'drop_path'):
+ self.drop_path = self.drop_path1
+ if self.gamma_1 is None:
+ x = x + self.drop_path(self.attn(self.norm1(x), resolution, shared_rel_pos_bias=shared_rel_pos_bias))
+ x = x + self.drop_path(self.mlp(self.norm2(x)))
+ else:
+ x = x + self.drop_path(self.gamma_1 * self.attn(self.norm1(x), resolution,
+ shared_rel_pos_bias=shared_rel_pos_bias))
+ x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x)))
+ return x
+
+
+def beit_forward_features(self, x):
+ """
+ Modification of timm.models.beit.py: Beit.forward_features to support arbitrary window sizes.
+ """
+ resolution = x.shape[2:]
+
+ x = self.patch_embed(x)
+ x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1)
+ if self.pos_embed is not None:
+ x = x + self.pos_embed
+ x = self.pos_drop(x)
+
+ rel_pos_bias = self.rel_pos_bias() if self.rel_pos_bias is not None else None
+ for blk in self.blocks:
+ if self.grad_checkpointing and not torch.jit.is_scripting():
+ x = checkpoint(blk, x, shared_rel_pos_bias=rel_pos_bias)
+ else:
+ x = blk(x, resolution, shared_rel_pos_bias=rel_pos_bias)
+ x = self.norm(x)
+ return x
+
+
+def _make_beit_backbone(
+ model,
+ features=[96, 192, 384, 768],
+ size=[384, 384],
+ hooks=[0, 4, 8, 11],
+ vit_features=768,
+ use_readout="ignore",
+ start_index=1,
+ start_index_readout=1,
+):
+ backbone = make_backbone_default(model, features, size, hooks, vit_features, use_readout, start_index,
+ start_index_readout)
+
+ backbone.model.patch_embed.forward = types.MethodType(patch_embed_forward, backbone.model.patch_embed)
+ backbone.model.forward_features = types.MethodType(beit_forward_features, backbone.model)
+
+ for block in backbone.model.blocks:
+ attn = block.attn
+ attn._get_rel_pos_bias = types.MethodType(_get_rel_pos_bias, attn)
+ attn.forward = types.MethodType(attention_forward, attn)
+ attn.relative_position_indices = {}
+
+ block.forward = types.MethodType(block_forward, block)
+
+ return backbone
+
+
+def _make_pretrained_beitl16_512(pretrained, use_readout="ignore", hooks=None):
+ model = timm.create_model("beit_large_patch16_512", pretrained=pretrained)
+
+ hooks = [5, 11, 17, 23] if hooks is None else hooks
+
+ features = [256, 512, 1024, 1024]
+
+ return _make_beit_backbone(
+ model,
+ features=features,
+ size=[512, 512],
+ hooks=hooks,
+ vit_features=1024,
+ use_readout=use_readout,
+ )
+
+
+def _make_pretrained_beitl16_384(pretrained, use_readout="ignore", hooks=None):
+ model = timm.create_model("beit_large_patch16_384", pretrained=pretrained)
+
+ hooks = [5, 11, 17, 23] if hooks is None else hooks
+ return _make_beit_backbone(
+ model,
+ features=[256, 512, 1024, 1024],
+ hooks=hooks,
+ vit_features=1024,
+ use_readout=use_readout,
+ )
+
+
+def _make_pretrained_beitb16_384(pretrained, use_readout="ignore", hooks=None):
+ model = timm.create_model("beit_base_patch16_384", pretrained=pretrained)
+
+ hooks = [2, 5, 8, 11] if hooks is None else hooks
+ return _make_beit_backbone(
+ model,
+ features=[96, 192, 384, 768],
+ hooks=hooks,
+ use_readout=use_readout,
+ )
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/levit.py b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/levit.py
new file mode 100644
index 0000000000000000000000000000000000000000..e6a7770fb76f085a0f3b6015902797c5805bba01
--- /dev/null
+++ b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/levit.py
@@ -0,0 +1,106 @@
+import timm
+import torch
+import torch.nn as nn
+import numpy as np
+
+from .utils import activations, get_activation, Transpose
+
+
+def forward_levit(pretrained, x):
+ pretrained.model.forward_features(x)
+
+ layer_1 = pretrained.activations["1"]
+ layer_2 = pretrained.activations["2"]
+ layer_3 = pretrained.activations["3"]
+
+ layer_1 = pretrained.act_postprocess1(layer_1)
+ layer_2 = pretrained.act_postprocess2(layer_2)
+ layer_3 = pretrained.act_postprocess3(layer_3)
+
+ return layer_1, layer_2, layer_3
+
+
+def _make_levit_backbone(
+ model,
+ hooks=[3, 11, 21],
+ patch_grid=[14, 14]
+):
+ pretrained = nn.Module()
+
+ pretrained.model = model
+ pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation("1"))
+ pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation("2"))
+ pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation("3"))
+
+ pretrained.activations = activations
+
+ patch_grid_size = np.array(patch_grid, dtype=int)
+
+ pretrained.act_postprocess1 = nn.Sequential(
+ Transpose(1, 2),
+ nn.Unflatten(2, torch.Size(patch_grid_size.tolist()))
+ )
+ pretrained.act_postprocess2 = nn.Sequential(
+ Transpose(1, 2),
+ nn.Unflatten(2, torch.Size((np.ceil(patch_grid_size / 2).astype(int)).tolist()))
+ )
+ pretrained.act_postprocess3 = nn.Sequential(
+ Transpose(1, 2),
+ nn.Unflatten(2, torch.Size((np.ceil(patch_grid_size / 4).astype(int)).tolist()))
+ )
+
+ return pretrained
+
+
+class ConvTransposeNorm(nn.Sequential):
+ """
+ Modification of
+ https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/levit.py: ConvNorm
+ such that ConvTranspose2d is used instead of Conv2d.
+ """
+
+ def __init__(
+ self, in_chs, out_chs, kernel_size=1, stride=1, pad=0, dilation=1,
+ groups=1, bn_weight_init=1):
+ super().__init__()
+ self.add_module('c',
+ nn.ConvTranspose2d(in_chs, out_chs, kernel_size, stride, pad, dilation, groups, bias=False))
+ self.add_module('bn', nn.BatchNorm2d(out_chs))
+
+ nn.init.constant_(self.bn.weight, bn_weight_init)
+
+ @torch.no_grad()
+ def fuse(self):
+ c, bn = self._modules.values()
+ w = bn.weight / (bn.running_var + bn.eps) ** 0.5
+ w = c.weight * w[:, None, None, None]
+ b = bn.bias - bn.running_mean * bn.weight / (bn.running_var + bn.eps) ** 0.5
+ m = nn.ConvTranspose2d(
+ w.size(1), w.size(0), w.shape[2:], stride=self.c.stride,
+ padding=self.c.padding, dilation=self.c.dilation, groups=self.c.groups)
+ m.weight.data.copy_(w)
+ m.bias.data.copy_(b)
+ return m
+
+
+def stem_b4_transpose(in_chs, out_chs, activation):
+ """
+ Modification of
+ https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/levit.py: stem_b16
+ such that ConvTranspose2d is used instead of Conv2d and stem is also reduced to the half.
+ """
+ return nn.Sequential(
+ ConvTransposeNorm(in_chs, out_chs, 3, 2, 1),
+ activation(),
+ ConvTransposeNorm(out_chs, out_chs // 2, 3, 2, 1),
+ activation())
+
+
+def _make_pretrained_levit_384(pretrained, hooks=None):
+ model = timm.create_model("levit_384", pretrained=pretrained)
+
+ hooks = [3, 11, 21] if hooks == None else hooks
+ return _make_levit_backbone(
+ model,
+ hooks=hooks
+ )
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/next_vit.py b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/next_vit.py
new file mode 100644
index 0000000000000000000000000000000000000000..773acfb1f67537edf56f4f67b686d8f9b174658d
--- /dev/null
+++ b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/next_vit.py
@@ -0,0 +1,39 @@
+import timm
+
+import torch.nn as nn
+
+from pathlib import Path
+from .utils import activations, forward_default, get_activation
+
+from ..external.next_vit.classification.nextvit import *
+
+
+def forward_next_vit(pretrained, x):
+ return forward_default(pretrained, x, "forward")
+
+
+def _make_next_vit_backbone(
+ model,
+ hooks=[2, 6, 36, 39],
+):
+ pretrained = nn.Module()
+
+ pretrained.model = model
+ pretrained.model.features[hooks[0]].register_forward_hook(get_activation("1"))
+ pretrained.model.features[hooks[1]].register_forward_hook(get_activation("2"))
+ pretrained.model.features[hooks[2]].register_forward_hook(get_activation("3"))
+ pretrained.model.features[hooks[3]].register_forward_hook(get_activation("4"))
+
+ pretrained.activations = activations
+
+ return pretrained
+
+
+def _make_pretrained_next_vit_large_6m(hooks=None):
+ model = timm.create_model("nextvit_large")
+
+ hooks = [2, 6, 36, 39] if hooks == None else hooks
+ return _make_next_vit_backbone(
+ model,
+ hooks=hooks,
+ )
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/swin.py b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/swin.py
new file mode 100644
index 0000000000000000000000000000000000000000..7477c70cb0109c875ce58ba9b2dbd42e12b2cbe1
--- /dev/null
+++ b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/swin.py
@@ -0,0 +1,13 @@
+import timm
+
+from .swin_common import _make_swin_backbone
+
+
+def _make_pretrained_swinl12_384(pretrained, hooks=None):
+ model = timm.create_model("swin_large_patch4_window12_384", pretrained=pretrained)
+
+ hooks = [1, 1, 17, 1] if hooks == None else hooks
+ return _make_swin_backbone(
+ model,
+ hooks=hooks
+ )
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/swin2.py b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/swin2.py
new file mode 100644
index 0000000000000000000000000000000000000000..9b0ab3ccb244fc2d71cb01b96a2d6f0544a041ca
--- /dev/null
+++ b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/swin2.py
@@ -0,0 +1,34 @@
+import timm
+
+from .swin_common import _make_swin_backbone
+
+
+def _make_pretrained_swin2l24_384(pretrained, hooks=None):
+ model = timm.create_model("swinv2_large_window12to24_192to384_22kft1k", pretrained=pretrained)
+
+ hooks = [1, 1, 17, 1] if hooks == None else hooks
+ return _make_swin_backbone(
+ model,
+ hooks=hooks
+ )
+
+
+def _make_pretrained_swin2b24_384(pretrained, hooks=None):
+ model = timm.create_model("swinv2_base_window12to24_192to384_22kft1k", pretrained=pretrained)
+
+ hooks = [1, 1, 17, 1] if hooks == None else hooks
+ return _make_swin_backbone(
+ model,
+ hooks=hooks
+ )
+
+
+def _make_pretrained_swin2t16_256(pretrained, hooks=None):
+ model = timm.create_model("swinv2_tiny_window16_256", pretrained=pretrained)
+
+ hooks = [1, 1, 5, 1] if hooks == None else hooks
+ return _make_swin_backbone(
+ model,
+ hooks=hooks,
+ patch_grid=[64, 64]
+ )
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/swin_common.py b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/swin_common.py
new file mode 100644
index 0000000000000000000000000000000000000000..e61c3633fafcd496505ce209f8152f0aeee0572c
--- /dev/null
+++ b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/swin_common.py
@@ -0,0 +1,52 @@
+import torch
+
+import torch.nn as nn
+import numpy as np
+
+from .utils import activations, forward_default, get_activation, Transpose
+
+
+def forward_swin(pretrained, x):
+ return forward_default(pretrained, x)
+
+
+def _make_swin_backbone(
+ model,
+ hooks=[1, 1, 17, 1],
+ patch_grid=[96, 96]
+):
+ pretrained = nn.Module()
+
+ pretrained.model = model
+ pretrained.model.layers[0].blocks[hooks[0]].register_forward_hook(get_activation("1"))
+ pretrained.model.layers[1].blocks[hooks[1]].register_forward_hook(get_activation("2"))
+ pretrained.model.layers[2].blocks[hooks[2]].register_forward_hook(get_activation("3"))
+ pretrained.model.layers[3].blocks[hooks[3]].register_forward_hook(get_activation("4"))
+
+ pretrained.activations = activations
+
+ if hasattr(model, "patch_grid"):
+ used_patch_grid = model.patch_grid
+ else:
+ used_patch_grid = patch_grid
+
+ patch_grid_size = np.array(used_patch_grid, dtype=int)
+
+ pretrained.act_postprocess1 = nn.Sequential(
+ Transpose(1, 2),
+ nn.Unflatten(2, torch.Size(patch_grid_size.tolist()))
+ )
+ pretrained.act_postprocess2 = nn.Sequential(
+ Transpose(1, 2),
+ nn.Unflatten(2, torch.Size((patch_grid_size // 2).tolist()))
+ )
+ pretrained.act_postprocess3 = nn.Sequential(
+ Transpose(1, 2),
+ nn.Unflatten(2, torch.Size((patch_grid_size // 4).tolist()))
+ )
+ pretrained.act_postprocess4 = nn.Sequential(
+ Transpose(1, 2),
+ nn.Unflatten(2, torch.Size((patch_grid_size // 8).tolist()))
+ )
+
+ return pretrained
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/utils.py b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..b334c3c55d5ee1da23d481de8b7555c5e9093754
--- /dev/null
+++ b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/utils.py
@@ -0,0 +1,249 @@
+import torch
+
+import torch.nn as nn
+
+
+class Slice(nn.Module):
+ def __init__(self, start_index=1):
+ super(Slice, self).__init__()
+ self.start_index = start_index
+
+ def forward(self, x):
+ return x[:, self.start_index:]
+
+
+class AddReadout(nn.Module):
+ def __init__(self, start_index=1):
+ super(AddReadout, self).__init__()
+ self.start_index = start_index
+
+ def forward(self, x):
+ if self.start_index == 2:
+ readout = (x[:, 0] + x[:, 1]) / 2
+ else:
+ readout = x[:, 0]
+ return x[:, self.start_index:] + readout.unsqueeze(1)
+
+
+class ProjectReadout(nn.Module):
+ def __init__(self, in_features, start_index=1):
+ super(ProjectReadout, self).__init__()
+ self.start_index = start_index
+
+ self.project = nn.Sequential(nn.Linear(2 * in_features, in_features), nn.GELU())
+
+ def forward(self, x):
+ readout = x[:, 0].unsqueeze(1).expand_as(x[:, self.start_index:])
+ features = torch.cat((x[:, self.start_index:], readout), -1)
+
+ return self.project(features)
+
+
+class Transpose(nn.Module):
+ def __init__(self, dim0, dim1):
+ super(Transpose, self).__init__()
+ self.dim0 = dim0
+ self.dim1 = dim1
+
+ def forward(self, x):
+ x = x.transpose(self.dim0, self.dim1)
+ return x
+
+
+activations = {}
+
+
+def get_activation(name):
+ def hook(model, input, output):
+ activations[name] = output
+
+ return hook
+
+
+def forward_default(pretrained, x, function_name="forward_features"):
+ exec(f"pretrained.model.{function_name}(x)")
+
+ layer_1 = pretrained.activations["1"]
+ layer_2 = pretrained.activations["2"]
+ layer_3 = pretrained.activations["3"]
+ layer_4 = pretrained.activations["4"]
+
+ if hasattr(pretrained, "act_postprocess1"):
+ layer_1 = pretrained.act_postprocess1(layer_1)
+ if hasattr(pretrained, "act_postprocess2"):
+ layer_2 = pretrained.act_postprocess2(layer_2)
+ if hasattr(pretrained, "act_postprocess3"):
+ layer_3 = pretrained.act_postprocess3(layer_3)
+ if hasattr(pretrained, "act_postprocess4"):
+ layer_4 = pretrained.act_postprocess4(layer_4)
+
+ return layer_1, layer_2, layer_3, layer_4
+
+
+def forward_adapted_unflatten(pretrained, x, function_name="forward_features"):
+ b, c, h, w = x.shape
+
+ exec(f"glob = pretrained.model.{function_name}(x)")
+
+ layer_1 = pretrained.activations["1"]
+ layer_2 = pretrained.activations["2"]
+ layer_3 = pretrained.activations["3"]
+ layer_4 = pretrained.activations["4"]
+
+ layer_1 = pretrained.act_postprocess1[0:2](layer_1)
+ layer_2 = pretrained.act_postprocess2[0:2](layer_2)
+ layer_3 = pretrained.act_postprocess3[0:2](layer_3)
+ layer_4 = pretrained.act_postprocess4[0:2](layer_4)
+
+ unflatten = nn.Sequential(
+ nn.Unflatten(
+ 2,
+ torch.Size(
+ [
+ h // pretrained.model.patch_size[1],
+ w // pretrained.model.patch_size[0],
+ ]
+ ),
+ )
+ )
+
+ if layer_1.ndim == 3:
+ layer_1 = unflatten(layer_1)
+ if layer_2.ndim == 3:
+ layer_2 = unflatten(layer_2)
+ if layer_3.ndim == 3:
+ layer_3 = unflatten(layer_3)
+ if layer_4.ndim == 3:
+ layer_4 = unflatten(layer_4)
+
+ layer_1 = pretrained.act_postprocess1[3: len(pretrained.act_postprocess1)](layer_1)
+ layer_2 = pretrained.act_postprocess2[3: len(pretrained.act_postprocess2)](layer_2)
+ layer_3 = pretrained.act_postprocess3[3: len(pretrained.act_postprocess3)](layer_3)
+ layer_4 = pretrained.act_postprocess4[3: len(pretrained.act_postprocess4)](layer_4)
+
+ return layer_1, layer_2, layer_3, layer_4
+
+
+def get_readout_oper(vit_features, features, use_readout, start_index=1):
+ if use_readout == "ignore":
+ readout_oper = [Slice(start_index)] * len(features)
+ elif use_readout == "add":
+ readout_oper = [AddReadout(start_index)] * len(features)
+ elif use_readout == "project":
+ readout_oper = [
+ ProjectReadout(vit_features, start_index) for out_feat in features
+ ]
+ else:
+ assert (
+ False
+ ), "wrong operation for readout token, use_readout can be 'ignore', 'add', or 'project'"
+
+ return readout_oper
+
+
+def make_backbone_default(
+ model,
+ features=[96, 192, 384, 768],
+ size=[384, 384],
+ hooks=[2, 5, 8, 11],
+ vit_features=768,
+ use_readout="ignore",
+ start_index=1,
+ start_index_readout=1,
+):
+ pretrained = nn.Module()
+
+ pretrained.model = model
+ pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation("1"))
+ pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation("2"))
+ pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation("3"))
+ pretrained.model.blocks[hooks[3]].register_forward_hook(get_activation("4"))
+
+ pretrained.activations = activations
+
+ readout_oper = get_readout_oper(vit_features, features, use_readout, start_index_readout)
+
+ # 32, 48, 136, 384
+ pretrained.act_postprocess1 = nn.Sequential(
+ readout_oper[0],
+ Transpose(1, 2),
+ nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
+ nn.Conv2d(
+ in_channels=vit_features,
+ out_channels=features[0],
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ ),
+ nn.ConvTranspose2d(
+ in_channels=features[0],
+ out_channels=features[0],
+ kernel_size=4,
+ stride=4,
+ padding=0,
+ bias=True,
+ dilation=1,
+ groups=1,
+ ),
+ )
+
+ pretrained.act_postprocess2 = nn.Sequential(
+ readout_oper[1],
+ Transpose(1, 2),
+ nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
+ nn.Conv2d(
+ in_channels=vit_features,
+ out_channels=features[1],
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ ),
+ nn.ConvTranspose2d(
+ in_channels=features[1],
+ out_channels=features[1],
+ kernel_size=2,
+ stride=2,
+ padding=0,
+ bias=True,
+ dilation=1,
+ groups=1,
+ ),
+ )
+
+ pretrained.act_postprocess3 = nn.Sequential(
+ readout_oper[2],
+ Transpose(1, 2),
+ nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
+ nn.Conv2d(
+ in_channels=vit_features,
+ out_channels=features[2],
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ ),
+ )
+
+ pretrained.act_postprocess4 = nn.Sequential(
+ readout_oper[3],
+ Transpose(1, 2),
+ nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
+ nn.Conv2d(
+ in_channels=vit_features,
+ out_channels=features[3],
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ ),
+ nn.Conv2d(
+ in_channels=features[3],
+ out_channels=features[3],
+ kernel_size=3,
+ stride=2,
+ padding=1,
+ ),
+ )
+
+ pretrained.model.start_index = start_index
+ pretrained.model.patch_size = [16, 16]
+
+ return pretrained
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/vit.py b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/vit.py
new file mode 100644
index 0000000000000000000000000000000000000000..27e103c6e383da4fa6ea10e2ec5e19b486be0193
--- /dev/null
+++ b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/vit.py
@@ -0,0 +1,221 @@
+import torch
+import torch.nn as nn
+import timm
+import types
+import math
+import torch.nn.functional as F
+
+from .utils import (activations, forward_adapted_unflatten, get_activation, get_readout_oper,
+ make_backbone_default, Transpose)
+
+
+def forward_vit(pretrained, x):
+ return forward_adapted_unflatten(pretrained, x, "forward_flex")
+
+
+def _resize_pos_embed(self, posemb, gs_h, gs_w):
+ posemb_tok, posemb_grid = (
+ posemb[:, : self.start_index],
+ posemb[0, self.start_index:],
+ )
+
+ gs_old = int(math.sqrt(len(posemb_grid)))
+
+ posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
+ posemb_grid = F.interpolate(posemb_grid, size=(gs_h, gs_w), mode="bilinear")
+ posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_h * gs_w, -1)
+
+ posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
+
+ return posemb
+
+
+def forward_flex(self, x):
+ b, c, h, w = x.shape
+
+ pos_embed = self._resize_pos_embed(
+ self.pos_embed, h // self.patch_size[1], w // self.patch_size[0]
+ )
+
+ B = x.shape[0]
+
+ if hasattr(self.patch_embed, "backbone"):
+ x = self.patch_embed.backbone(x)
+ if isinstance(x, (list, tuple)):
+ x = x[-1] # last feature if backbone outputs list/tuple of features
+
+ x = self.patch_embed.proj(x).flatten(2).transpose(1, 2)
+
+ if getattr(self, "dist_token", None) is not None:
+ cls_tokens = self.cls_token.expand(
+ B, -1, -1
+ ) # stole cls_tokens impl from Phil Wang, thanks
+ dist_token = self.dist_token.expand(B, -1, -1)
+ x = torch.cat((cls_tokens, dist_token, x), dim=1)
+ else:
+ if self.no_embed_class:
+ x = x + pos_embed
+ cls_tokens = self.cls_token.expand(
+ B, -1, -1
+ ) # stole cls_tokens impl from Phil Wang, thanks
+ x = torch.cat((cls_tokens, x), dim=1)
+
+ if not self.no_embed_class:
+ x = x + pos_embed
+ x = self.pos_drop(x)
+
+ for blk in self.blocks:
+ x = blk(x)
+
+ x = self.norm(x)
+
+ return x
+
+
+def _make_vit_b16_backbone(
+ model,
+ features=[96, 192, 384, 768],
+ size=[384, 384],
+ hooks=[2, 5, 8, 11],
+ vit_features=768,
+ use_readout="ignore",
+ start_index=1,
+ start_index_readout=1,
+):
+ pretrained = make_backbone_default(model, features, size, hooks, vit_features, use_readout, start_index,
+ start_index_readout)
+
+ # We inject this function into the VisionTransformer instances so that
+ # we can use it with interpolated position embeddings without modifying the library source.
+ pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model)
+ pretrained.model._resize_pos_embed = types.MethodType(
+ _resize_pos_embed, pretrained.model
+ )
+
+ return pretrained
+
+
+def _make_pretrained_vitl16_384(pretrained, use_readout="ignore", hooks=None):
+ model = timm.create_model("vit_large_patch16_384", pretrained=pretrained)
+
+ hooks = [5, 11, 17, 23] if hooks == None else hooks
+ return _make_vit_b16_backbone(
+ model,
+ features=[256, 512, 1024, 1024],
+ hooks=hooks,
+ vit_features=1024,
+ use_readout=use_readout,
+ )
+
+
+def _make_pretrained_vitb16_384(pretrained, use_readout="ignore", hooks=None):
+ model = timm.create_model("vit_base_patch16_384", pretrained=pretrained)
+
+ hooks = [2, 5, 8, 11] if hooks == None else hooks
+ return _make_vit_b16_backbone(
+ model, features=[96, 192, 384, 768], hooks=hooks, use_readout=use_readout
+ )
+
+
+def _make_vit_b_rn50_backbone(
+ model,
+ features=[256, 512, 768, 768],
+ size=[384, 384],
+ hooks=[0, 1, 8, 11],
+ vit_features=768,
+ patch_size=[16, 16],
+ number_stages=2,
+ use_vit_only=False,
+ use_readout="ignore",
+ start_index=1,
+):
+ pretrained = nn.Module()
+
+ pretrained.model = model
+
+ used_number_stages = 0 if use_vit_only else number_stages
+ for s in range(used_number_stages):
+ pretrained.model.patch_embed.backbone.stages[s].register_forward_hook(
+ get_activation(str(s + 1))
+ )
+ for s in range(used_number_stages, 4):
+ pretrained.model.blocks[hooks[s]].register_forward_hook(get_activation(str(s + 1)))
+
+ pretrained.activations = activations
+
+ readout_oper = get_readout_oper(vit_features, features, use_readout, start_index)
+
+ for s in range(used_number_stages):
+ value = nn.Sequential(nn.Identity(), nn.Identity(), nn.Identity())
+ exec(f"pretrained.act_postprocess{s + 1}=value")
+ for s in range(used_number_stages, 4):
+ if s < number_stages:
+ final_layer = nn.ConvTranspose2d(
+ in_channels=features[s],
+ out_channels=features[s],
+ kernel_size=4 // (2 ** s),
+ stride=4 // (2 ** s),
+ padding=0,
+ bias=True,
+ dilation=1,
+ groups=1,
+ )
+ elif s > number_stages:
+ final_layer = nn.Conv2d(
+ in_channels=features[3],
+ out_channels=features[3],
+ kernel_size=3,
+ stride=2,
+ padding=1,
+ )
+ else:
+ final_layer = None
+
+ layers = [
+ readout_oper[s],
+ Transpose(1, 2),
+ nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
+ nn.Conv2d(
+ in_channels=vit_features,
+ out_channels=features[s],
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ ),
+ ]
+ if final_layer is not None:
+ layers.append(final_layer)
+
+ value = nn.Sequential(*layers)
+ exec(f"pretrained.act_postprocess{s + 1}=value")
+
+ pretrained.model.start_index = start_index
+ pretrained.model.patch_size = patch_size
+
+ # We inject this function into the VisionTransformer instances so that
+ # we can use it with interpolated position embeddings without modifying the library source.
+ pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model)
+
+ # We inject this function into the VisionTransformer instances so that
+ # we can use it with interpolated position embeddings without modifying the library source.
+ pretrained.model._resize_pos_embed = types.MethodType(
+ _resize_pos_embed, pretrained.model
+ )
+
+ return pretrained
+
+
+def _make_pretrained_vitb_rn50_384(
+ pretrained, use_readout="ignore", hooks=None, use_vit_only=False
+):
+ model = timm.create_model("vit_base_resnet50_384", pretrained=pretrained)
+
+ hooks = [0, 1, 8, 11] if hooks == None else hooks
+ return _make_vit_b_rn50_backbone(
+ model,
+ features=[256, 512, 768, 768],
+ size=[384, 384],
+ hooks=hooks,
+ use_vit_only=use_vit_only,
+ use_readout=use_readout,
+ )
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/base_model.py b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/base_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..27fe93ec5bfcfae3f8e78392ca617cdb6adc9b11
--- /dev/null
+++ b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/base_model.py
@@ -0,0 +1,16 @@
+import torch
+
+
+class BaseModel(torch.nn.Module):
+ def load(self, path):
+ """Load model from file.
+
+ Args:
+ path (str): file path
+ """
+ parameters = torch.load(path, map_location=torch.device('cpu'))
+
+ if "optimizer" in parameters:
+ parameters = parameters["model"]
+
+ self.load_state_dict(parameters)
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/blocks.py b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/blocks.py
new file mode 100644
index 0000000000000000000000000000000000000000..7f48f075c3ad3f62d0f303937c410982f1491929
--- /dev/null
+++ b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/blocks.py
@@ -0,0 +1,439 @@
+import torch
+import torch.nn as nn
+
+from .backbones.beit import (
+ _make_pretrained_beitl16_512,
+ _make_pretrained_beitl16_384,
+ _make_pretrained_beitb16_384,
+ forward_beit,
+)
+from .backbones.swin_common import (
+ forward_swin,
+)
+from .backbones.swin2 import (
+ _make_pretrained_swin2l24_384,
+ _make_pretrained_swin2b24_384,
+ _make_pretrained_swin2t16_256,
+)
+from .backbones.swin import (
+ _make_pretrained_swinl12_384,
+)
+from .backbones.levit import (
+ _make_pretrained_levit_384,
+ forward_levit,
+)
+from .backbones.vit import (
+ _make_pretrained_vitb_rn50_384,
+ _make_pretrained_vitl16_384,
+ _make_pretrained_vitb16_384,
+ forward_vit,
+)
+
+def _make_encoder(backbone, features, use_pretrained, groups=1, expand=False, exportable=True, hooks=None,
+ use_vit_only=False, use_readout="ignore", in_features=[96, 256, 512, 1024]):
+ if backbone == "beitl16_512":
+ pretrained = _make_pretrained_beitl16_512(
+ use_pretrained, hooks=hooks, use_readout=use_readout
+ )
+ scratch = _make_scratch(
+ [256, 512, 1024, 1024], features, groups=groups, expand=expand
+ ) # BEiT_512-L (backbone)
+ elif backbone == "beitl16_384":
+ pretrained = _make_pretrained_beitl16_384(
+ use_pretrained, hooks=hooks, use_readout=use_readout
+ )
+ scratch = _make_scratch(
+ [256, 512, 1024, 1024], features, groups=groups, expand=expand
+ ) # BEiT_384-L (backbone)
+ elif backbone == "beitb16_384":
+ pretrained = _make_pretrained_beitb16_384(
+ use_pretrained, hooks=hooks, use_readout=use_readout
+ )
+ scratch = _make_scratch(
+ [96, 192, 384, 768], features, groups=groups, expand=expand
+ ) # BEiT_384-B (backbone)
+ elif backbone == "swin2l24_384":
+ pretrained = _make_pretrained_swin2l24_384(
+ use_pretrained, hooks=hooks
+ )
+ scratch = _make_scratch(
+ [192, 384, 768, 1536], features, groups=groups, expand=expand
+ ) # Swin2-L/12to24 (backbone)
+ elif backbone == "swin2b24_384":
+ pretrained = _make_pretrained_swin2b24_384(
+ use_pretrained, hooks=hooks
+ )
+ scratch = _make_scratch(
+ [128, 256, 512, 1024], features, groups=groups, expand=expand
+ ) # Swin2-B/12to24 (backbone)
+ elif backbone == "swin2t16_256":
+ pretrained = _make_pretrained_swin2t16_256(
+ use_pretrained, hooks=hooks
+ )
+ scratch = _make_scratch(
+ [96, 192, 384, 768], features, groups=groups, expand=expand
+ ) # Swin2-T/16 (backbone)
+ elif backbone == "swinl12_384":
+ pretrained = _make_pretrained_swinl12_384(
+ use_pretrained, hooks=hooks
+ )
+ scratch = _make_scratch(
+ [192, 384, 768, 1536], features, groups=groups, expand=expand
+ ) # Swin-L/12 (backbone)
+ elif backbone == "next_vit_large_6m":
+ from .backbones.next_vit import _make_pretrained_next_vit_large_6m
+ pretrained = _make_pretrained_next_vit_large_6m(hooks=hooks)
+ scratch = _make_scratch(
+ in_features, features, groups=groups, expand=expand
+ ) # Next-ViT-L on ImageNet-1K-6M (backbone)
+ elif backbone == "levit_384":
+ pretrained = _make_pretrained_levit_384(
+ use_pretrained, hooks=hooks
+ )
+ scratch = _make_scratch(
+ [384, 512, 768], features, groups=groups, expand=expand
+ ) # LeViT 384 (backbone)
+ elif backbone == "vitl16_384":
+ pretrained = _make_pretrained_vitl16_384(
+ use_pretrained, hooks=hooks, use_readout=use_readout
+ )
+ scratch = _make_scratch(
+ [256, 512, 1024, 1024], features, groups=groups, expand=expand
+ ) # ViT-L/16 - 85.0% Top1 (backbone)
+ elif backbone == "vitb_rn50_384":
+ pretrained = _make_pretrained_vitb_rn50_384(
+ use_pretrained,
+ hooks=hooks,
+ use_vit_only=use_vit_only,
+ use_readout=use_readout,
+ )
+ scratch = _make_scratch(
+ [256, 512, 768, 768], features, groups=groups, expand=expand
+ ) # ViT-H/16 - 85.0% Top1 (backbone)
+ elif backbone == "vitb16_384":
+ pretrained = _make_pretrained_vitb16_384(
+ use_pretrained, hooks=hooks, use_readout=use_readout
+ )
+ scratch = _make_scratch(
+ [96, 192, 384, 768], features, groups=groups, expand=expand
+ ) # ViT-B/16 - 84.6% Top1 (backbone)
+ elif backbone == "resnext101_wsl":
+ pretrained = _make_pretrained_resnext101_wsl(use_pretrained)
+ scratch = _make_scratch([256, 512, 1024, 2048], features, groups=groups, expand=expand) # efficientnet_lite3
+ elif backbone == "efficientnet_lite3":
+ pretrained = _make_pretrained_efficientnet_lite3(use_pretrained, exportable=exportable)
+ scratch = _make_scratch([32, 48, 136, 384], features, groups=groups, expand=expand) # efficientnet_lite3
+ else:
+ print(f"Backbone '{backbone}' not implemented")
+ assert False
+
+ return pretrained, scratch
+
+
+def _make_scratch(in_shape, out_shape, groups=1, expand=False):
+ scratch = nn.Module()
+
+ out_shape1 = out_shape
+ out_shape2 = out_shape
+ out_shape3 = out_shape
+ if len(in_shape) >= 4:
+ out_shape4 = out_shape
+
+ if expand:
+ out_shape1 = out_shape
+ out_shape2 = out_shape*2
+ out_shape3 = out_shape*4
+ if len(in_shape) >= 4:
+ out_shape4 = out_shape*8
+
+ scratch.layer1_rn = nn.Conv2d(
+ in_shape[0], out_shape1, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
+ )
+ scratch.layer2_rn = nn.Conv2d(
+ in_shape[1], out_shape2, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
+ )
+ scratch.layer3_rn = nn.Conv2d(
+ in_shape[2], out_shape3, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
+ )
+ if len(in_shape) >= 4:
+ scratch.layer4_rn = nn.Conv2d(
+ in_shape[3], out_shape4, kernel_size=3, stride=1, padding=1, bias=False, groups=groups
+ )
+
+ return scratch
+
+
+def _make_pretrained_efficientnet_lite3(use_pretrained, exportable=False):
+ efficientnet = torch.hub.load(
+ "rwightman/gen-efficientnet-pytorch",
+ "tf_efficientnet_lite3",
+ pretrained=use_pretrained,
+ exportable=exportable
+ )
+ return _make_efficientnet_backbone(efficientnet)
+
+
+def _make_efficientnet_backbone(effnet):
+ pretrained = nn.Module()
+
+ pretrained.layer1 = nn.Sequential(
+ effnet.conv_stem, effnet.bn1, effnet.act1, *effnet.blocks[0:2]
+ )
+ pretrained.layer2 = nn.Sequential(*effnet.blocks[2:3])
+ pretrained.layer3 = nn.Sequential(*effnet.blocks[3:5])
+ pretrained.layer4 = nn.Sequential(*effnet.blocks[5:9])
+
+ return pretrained
+
+
+def _make_resnet_backbone(resnet):
+ pretrained = nn.Module()
+ pretrained.layer1 = nn.Sequential(
+ resnet.conv1, resnet.bn1, resnet.relu, resnet.maxpool, resnet.layer1
+ )
+
+ pretrained.layer2 = resnet.layer2
+ pretrained.layer3 = resnet.layer3
+ pretrained.layer4 = resnet.layer4
+
+ return pretrained
+
+
+def _make_pretrained_resnext101_wsl(use_pretrained):
+ resnet = torch.hub.load("facebookresearch/WSL-Images", "resnext101_32x8d_wsl")
+ return _make_resnet_backbone(resnet)
+
+
+
+class Interpolate(nn.Module):
+ """Interpolation module.
+ """
+
+ def __init__(self, scale_factor, mode, align_corners=False):
+ """Init.
+
+ Args:
+ scale_factor (float): scaling
+ mode (str): interpolation mode
+ """
+ super(Interpolate, self).__init__()
+
+ self.interp = nn.functional.interpolate
+ self.scale_factor = scale_factor
+ self.mode = mode
+ self.align_corners = align_corners
+
+ def forward(self, x):
+ """Forward pass.
+
+ Args:
+ x (tensor): input
+
+ Returns:
+ tensor: interpolated data
+ """
+
+ x = self.interp(
+ x, scale_factor=self.scale_factor, mode=self.mode, align_corners=self.align_corners
+ )
+
+ return x
+
+
+class ResidualConvUnit(nn.Module):
+ """Residual convolution module.
+ """
+
+ def __init__(self, features):
+ """Init.
+
+ Args:
+ features (int): number of features
+ """
+ super().__init__()
+
+ self.conv1 = nn.Conv2d(
+ features, features, kernel_size=3, stride=1, padding=1, bias=True
+ )
+
+ self.conv2 = nn.Conv2d(
+ features, features, kernel_size=3, stride=1, padding=1, bias=True
+ )
+
+ self.relu = nn.ReLU(inplace=True)
+
+ def forward(self, x):
+ """Forward pass.
+
+ Args:
+ x (tensor): input
+
+ Returns:
+ tensor: output
+ """
+ out = self.relu(x)
+ out = self.conv1(out)
+ out = self.relu(out)
+ out = self.conv2(out)
+
+ return out + x
+
+
+class FeatureFusionBlock(nn.Module):
+ """Feature fusion block.
+ """
+
+ def __init__(self, features):
+ """Init.
+
+ Args:
+ features (int): number of features
+ """
+ super(FeatureFusionBlock, self).__init__()
+
+ self.resConfUnit1 = ResidualConvUnit(features)
+ self.resConfUnit2 = ResidualConvUnit(features)
+
+ def forward(self, *xs):
+ """Forward pass.
+
+ Returns:
+ tensor: output
+ """
+ output = xs[0]
+
+ if len(xs) == 2:
+ output += self.resConfUnit1(xs[1])
+
+ output = self.resConfUnit2(output)
+
+ output = nn.functional.interpolate(
+ output, scale_factor=2, mode="bilinear", align_corners=True
+ )
+
+ return output
+
+
+
+
+class ResidualConvUnit_custom(nn.Module):
+ """Residual convolution module.
+ """
+
+ def __init__(self, features, activation, bn):
+ """Init.
+
+ Args:
+ features (int): number of features
+ """
+ super().__init__()
+
+ self.bn = bn
+
+ self.groups=1
+
+ self.conv1 = nn.Conv2d(
+ features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups
+ )
+
+ self.conv2 = nn.Conv2d(
+ features, features, kernel_size=3, stride=1, padding=1, bias=True, groups=self.groups
+ )
+
+ if self.bn==True:
+ self.bn1 = nn.BatchNorm2d(features)
+ self.bn2 = nn.BatchNorm2d(features)
+
+ self.activation = activation
+
+ self.skip_add = nn.quantized.FloatFunctional()
+
+ def forward(self, x):
+ """Forward pass.
+
+ Args:
+ x (tensor): input
+
+ Returns:
+ tensor: output
+ """
+
+ out = self.activation(x)
+ out = self.conv1(out)
+ if self.bn==True:
+ out = self.bn1(out)
+
+ out = self.activation(out)
+ out = self.conv2(out)
+ if self.bn==True:
+ out = self.bn2(out)
+
+ if self.groups > 1:
+ out = self.conv_merge(out)
+
+ return self.skip_add.add(out, x)
+
+ # return out + x
+
+
+class FeatureFusionBlock_custom(nn.Module):
+ """Feature fusion block.
+ """
+
+ def __init__(self, features, activation, deconv=False, bn=False, expand=False, align_corners=True, size=None):
+ """Init.
+
+ Args:
+ features (int): number of features
+ """
+ super(FeatureFusionBlock_custom, self).__init__()
+
+ self.deconv = deconv
+ self.align_corners = align_corners
+
+ self.groups=1
+
+ self.expand = expand
+ out_features = features
+ if self.expand==True:
+ out_features = features//2
+
+ self.out_conv = nn.Conv2d(features, out_features, kernel_size=1, stride=1, padding=0, bias=True, groups=1)
+
+ self.resConfUnit1 = ResidualConvUnit_custom(features, activation, bn)
+ self.resConfUnit2 = ResidualConvUnit_custom(features, activation, bn)
+
+ self.skip_add = nn.quantized.FloatFunctional()
+
+ self.size=size
+
+ def forward(self, *xs, size=None):
+ """Forward pass.
+
+ Returns:
+ tensor: output
+ """
+ output = xs[0]
+
+ if len(xs) == 2:
+ res = self.resConfUnit1(xs[1])
+ output = self.skip_add.add(output, res)
+ # output += res
+
+ output = self.resConfUnit2(output)
+
+ if (size is None) and (self.size is None):
+ modifier = {"scale_factor": 2}
+ elif size is None:
+ modifier = {"size": self.size}
+ else:
+ modifier = {"size": size}
+
+ output = nn.functional.interpolate(
+ output, **modifier, mode="bilinear", align_corners=self.align_corners
+ )
+
+ output = self.out_conv(output)
+
+ return output
+
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/dpt_depth.py b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/dpt_depth.py
new file mode 100644
index 0000000000000000000000000000000000000000..0382e7303b3920db8c28bf84e0e289862fcb8f26
--- /dev/null
+++ b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/dpt_depth.py
@@ -0,0 +1,166 @@
+import torch
+import torch.nn as nn
+
+from .base_model import BaseModel
+from .blocks import (
+ FeatureFusionBlock_custom,
+ Interpolate,
+ _make_encoder,
+ forward_beit,
+ forward_swin,
+ forward_levit,
+ forward_vit,
+)
+from .backbones.levit import stem_b4_transpose
+from timm.models.layers import get_act_layer
+
+
+def _make_fusion_block(features, use_bn, size = None):
+ return FeatureFusionBlock_custom(
+ features,
+ nn.ReLU(False),
+ deconv=False,
+ bn=use_bn,
+ expand=False,
+ align_corners=True,
+ size=size,
+ )
+
+
+class DPT(BaseModel):
+ def __init__(
+ self,
+ head,
+ features=256,
+ backbone="vitb_rn50_384",
+ readout="project",
+ channels_last=False,
+ use_bn=False,
+ **kwargs
+ ):
+
+ super(DPT, self).__init__()
+
+ self.channels_last = channels_last
+
+ # For the Swin, Swin 2, LeViT and Next-ViT Transformers, the hierarchical architectures prevent setting the
+ # hooks freely. Instead, the hooks have to be chosen according to the ranges specified in the comments.
+ hooks = {
+ "beitl16_512": [5, 11, 17, 23],
+ "beitl16_384": [5, 11, 17, 23],
+ "beitb16_384": [2, 5, 8, 11],
+ "swin2l24_384": [1, 1, 17, 1], # Allowed ranges: [0, 1], [0, 1], [ 0, 17], [ 0, 1]
+ "swin2b24_384": [1, 1, 17, 1], # [0, 1], [0, 1], [ 0, 17], [ 0, 1]
+ "swin2t16_256": [1, 1, 5, 1], # [0, 1], [0, 1], [ 0, 5], [ 0, 1]
+ "swinl12_384": [1, 1, 17, 1], # [0, 1], [0, 1], [ 0, 17], [ 0, 1]
+ "next_vit_large_6m": [2, 6, 36, 39], # [0, 2], [3, 6], [ 7, 36], [37, 39]
+ "levit_384": [3, 11, 21], # [0, 3], [6, 11], [14, 21]
+ "vitb_rn50_384": [0, 1, 8, 11],
+ "vitb16_384": [2, 5, 8, 11],
+ "vitl16_384": [5, 11, 17, 23],
+ }[backbone]
+
+ if "next_vit" in backbone:
+ in_features = {
+ "next_vit_large_6m": [96, 256, 512, 1024],
+ }[backbone]
+ else:
+ in_features = None
+
+ # Instantiate backbone and reassemble blocks
+ self.pretrained, self.scratch = _make_encoder(
+ backbone,
+ features,
+ False, # Set to true of you want to train from scratch, uses ImageNet weights
+ groups=1,
+ expand=False,
+ exportable=False,
+ hooks=hooks,
+ use_readout=readout,
+ in_features=in_features,
+ )
+
+ self.number_layers = len(hooks) if hooks is not None else 4
+ size_refinenet3 = None
+ self.scratch.stem_transpose = None
+
+ if "beit" in backbone:
+ self.forward_transformer = forward_beit
+ elif "swin" in backbone:
+ self.forward_transformer = forward_swin
+ elif "next_vit" in backbone:
+ from .backbones.next_vit import forward_next_vit
+ self.forward_transformer = forward_next_vit
+ elif "levit" in backbone:
+ self.forward_transformer = forward_levit
+ size_refinenet3 = 7
+ self.scratch.stem_transpose = stem_b4_transpose(256, 128, get_act_layer("hard_swish"))
+ else:
+ self.forward_transformer = forward_vit
+
+ self.scratch.refinenet1 = _make_fusion_block(features, use_bn)
+ self.scratch.refinenet2 = _make_fusion_block(features, use_bn)
+ self.scratch.refinenet3 = _make_fusion_block(features, use_bn, size_refinenet3)
+ if self.number_layers >= 4:
+ self.scratch.refinenet4 = _make_fusion_block(features, use_bn)
+
+ self.scratch.output_conv = head
+
+
+ def forward(self, x):
+ if self.channels_last == True:
+ x.contiguous(memory_format=torch.channels_last)
+
+ layers = self.forward_transformer(self.pretrained, x)
+ if self.number_layers == 3:
+ layer_1, layer_2, layer_3 = layers
+ else:
+ layer_1, layer_2, layer_3, layer_4 = layers
+
+ layer_1_rn = self.scratch.layer1_rn(layer_1)
+ layer_2_rn = self.scratch.layer2_rn(layer_2)
+ layer_3_rn = self.scratch.layer3_rn(layer_3)
+ if self.number_layers >= 4:
+ layer_4_rn = self.scratch.layer4_rn(layer_4)
+
+ if self.number_layers == 3:
+ path_3 = self.scratch.refinenet3(layer_3_rn, size=layer_2_rn.shape[2:])
+ else:
+ path_4 = self.scratch.refinenet4(layer_4_rn, size=layer_3_rn.shape[2:])
+ path_3 = self.scratch.refinenet3(path_4, layer_3_rn, size=layer_2_rn.shape[2:])
+ path_2 = self.scratch.refinenet2(path_3, layer_2_rn, size=layer_1_rn.shape[2:])
+ path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
+
+ if self.scratch.stem_transpose is not None:
+ path_1 = self.scratch.stem_transpose(path_1)
+
+ out = self.scratch.output_conv(path_1)
+
+ return out
+
+
+class DPTDepthModel(DPT):
+ def __init__(self, path=None, non_negative=True, **kwargs):
+ features = kwargs["features"] if "features" in kwargs else 256
+ head_features_1 = kwargs["head_features_1"] if "head_features_1" in kwargs else features
+ head_features_2 = kwargs["head_features_2"] if "head_features_2" in kwargs else 32
+ kwargs.pop("head_features_1", None)
+ kwargs.pop("head_features_2", None)
+
+ head = nn.Sequential(
+ nn.Conv2d(head_features_1, head_features_1 // 2, kernel_size=3, stride=1, padding=1),
+ Interpolate(scale_factor=2, mode="bilinear", align_corners=True),
+ nn.Conv2d(head_features_1 // 2, head_features_2, kernel_size=3, stride=1, padding=1),
+ nn.ReLU(True),
+ nn.Conv2d(head_features_2, 1, kernel_size=1, stride=1, padding=0),
+ nn.ReLU(True) if non_negative else nn.Identity(),
+ nn.Identity(),
+ )
+
+ super().__init__(head, **kwargs)
+
+ if path is not None:
+ self.load(path)
+
+ def forward(self, x):
+ return super().forward(x).squeeze(dim=1)
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/midas_net.py b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/midas_net.py
new file mode 100644
index 0000000000000000000000000000000000000000..e10487c8fd4d3b50a4d9cf9bb72ce0a4734894c2
--- /dev/null
+++ b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/midas_net.py
@@ -0,0 +1,76 @@
+"""MidashNet: Network for monocular depth estimation trained by mixing several datasets.
+This file contains code that is adapted from
+https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py
+"""
+import torch
+import torch.nn as nn
+
+from .base_model import BaseModel
+from .blocks import FeatureFusionBlock, Interpolate, _make_encoder
+
+
+class MidasNet(BaseModel):
+ """Network for monocular depth estimation.
+ """
+
+ def __init__(self, path=None, features=256, non_negative=True):
+ """Init.
+
+ Args:
+ path (str, optional): Path to saved model. Defaults to None.
+ features (int, optional): Number of features. Defaults to 256.
+ backbone (str, optional): Backbone network for encoder. Defaults to resnet50
+ """
+ print("Loading weights: ", path)
+
+ super(MidasNet, self).__init__()
+
+ use_pretrained = False if path is None else True
+
+ self.pretrained, self.scratch = _make_encoder(backbone="resnext101_wsl", features=features, use_pretrained=use_pretrained)
+
+ self.scratch.refinenet4 = FeatureFusionBlock(features)
+ self.scratch.refinenet3 = FeatureFusionBlock(features)
+ self.scratch.refinenet2 = FeatureFusionBlock(features)
+ self.scratch.refinenet1 = FeatureFusionBlock(features)
+
+ self.scratch.output_conv = nn.Sequential(
+ nn.Conv2d(features, 128, kernel_size=3, stride=1, padding=1),
+ Interpolate(scale_factor=2, mode="bilinear"),
+ nn.Conv2d(128, 32, kernel_size=3, stride=1, padding=1),
+ nn.ReLU(True),
+ nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
+ nn.ReLU(True) if non_negative else nn.Identity(),
+ )
+
+ if path:
+ self.load(path)
+
+ def forward(self, x):
+ """Forward pass.
+
+ Args:
+ x (tensor): input data (image)
+
+ Returns:
+ tensor: depth
+ """
+
+ layer_1 = self.pretrained.layer1(x)
+ layer_2 = self.pretrained.layer2(layer_1)
+ layer_3 = self.pretrained.layer3(layer_2)
+ layer_4 = self.pretrained.layer4(layer_3)
+
+ layer_1_rn = self.scratch.layer1_rn(layer_1)
+ layer_2_rn = self.scratch.layer2_rn(layer_2)
+ layer_3_rn = self.scratch.layer3_rn(layer_3)
+ layer_4_rn = self.scratch.layer4_rn(layer_4)
+
+ path_4 = self.scratch.refinenet4(layer_4_rn)
+ path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
+ path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
+ path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
+
+ out = self.scratch.output_conv(path_1)
+
+ return torch.squeeze(out, dim=1)
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/midas_net_custom.py b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/midas_net_custom.py
new file mode 100644
index 0000000000000000000000000000000000000000..3a8df446c55f057deb6db828db276ae9c0e2308d
--- /dev/null
+++ b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/midas_net_custom.py
@@ -0,0 +1,128 @@
+"""MidashNet: Network for monocular depth estimation trained by mixing several datasets.
+This file contains code that is adapted from
+https://github.com/thomasjpfan/pytorch_refinenet/blob/master/pytorch_refinenet/refinenet/refinenet_4cascade.py
+"""
+import torch
+import torch.nn as nn
+
+from .base_model import BaseModel
+from .blocks import FeatureFusionBlock, FeatureFusionBlock_custom, Interpolate, _make_encoder
+
+
+class MidasNet_small(BaseModel):
+ """Network for monocular depth estimation.
+ """
+
+ def __init__(self, path=None, features=64, backbone="efficientnet_lite3", non_negative=True, exportable=True, channels_last=False, align_corners=True,
+ blocks={'expand': True}):
+ """Init.
+
+ Args:
+ path (str, optional): Path to saved model. Defaults to None.
+ features (int, optional): Number of features. Defaults to 256.
+ backbone (str, optional): Backbone network for encoder. Defaults to resnet50
+ """
+ print("Loading weights: ", path)
+
+ super(MidasNet_small, self).__init__()
+
+ use_pretrained = False if path else True
+
+ self.channels_last = channels_last
+ self.blocks = blocks
+ self.backbone = backbone
+
+ self.groups = 1
+
+ features1=features
+ features2=features
+ features3=features
+ features4=features
+ self.expand = False
+ if "expand" in self.blocks and self.blocks['expand'] == True:
+ self.expand = True
+ features1=features
+ features2=features*2
+ features3=features*4
+ features4=features*8
+
+ self.pretrained, self.scratch = _make_encoder(self.backbone, features, use_pretrained, groups=self.groups, expand=self.expand, exportable=exportable)
+
+ self.scratch.activation = nn.ReLU(False)
+
+ self.scratch.refinenet4 = FeatureFusionBlock_custom(features4, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
+ self.scratch.refinenet3 = FeatureFusionBlock_custom(features3, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
+ self.scratch.refinenet2 = FeatureFusionBlock_custom(features2, self.scratch.activation, deconv=False, bn=False, expand=self.expand, align_corners=align_corners)
+ self.scratch.refinenet1 = FeatureFusionBlock_custom(features1, self.scratch.activation, deconv=False, bn=False, align_corners=align_corners)
+
+
+ self.scratch.output_conv = nn.Sequential(
+ nn.Conv2d(features, features//2, kernel_size=3, stride=1, padding=1, groups=self.groups),
+ Interpolate(scale_factor=2, mode="bilinear"),
+ nn.Conv2d(features//2, 32, kernel_size=3, stride=1, padding=1),
+ self.scratch.activation,
+ nn.Conv2d(32, 1, kernel_size=1, stride=1, padding=0),
+ nn.ReLU(True) if non_negative else nn.Identity(),
+ nn.Identity(),
+ )
+
+ if path:
+ self.load(path)
+
+
+ def forward(self, x):
+ """Forward pass.
+
+ Args:
+ x (tensor): input data (image)
+
+ Returns:
+ tensor: depth
+ """
+ if self.channels_last==True:
+ print("self.channels_last = ", self.channels_last)
+ x.contiguous(memory_format=torch.channels_last)
+
+
+ layer_1 = self.pretrained.layer1(x)
+ layer_2 = self.pretrained.layer2(layer_1)
+ layer_3 = self.pretrained.layer3(layer_2)
+ layer_4 = self.pretrained.layer4(layer_3)
+
+ layer_1_rn = self.scratch.layer1_rn(layer_1)
+ layer_2_rn = self.scratch.layer2_rn(layer_2)
+ layer_3_rn = self.scratch.layer3_rn(layer_3)
+ layer_4_rn = self.scratch.layer4_rn(layer_4)
+
+
+ path_4 = self.scratch.refinenet4(layer_4_rn)
+ path_3 = self.scratch.refinenet3(path_4, layer_3_rn)
+ path_2 = self.scratch.refinenet2(path_3, layer_2_rn)
+ path_1 = self.scratch.refinenet1(path_2, layer_1_rn)
+
+ out = self.scratch.output_conv(path_1)
+
+ return torch.squeeze(out, dim=1)
+
+
+
+def fuse_model(m):
+ prev_previous_type = nn.Identity()
+ prev_previous_name = ''
+ previous_type = nn.Identity()
+ previous_name = ''
+ for name, module in m.named_modules():
+ if prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d and type(module) == nn.ReLU:
+ # print("FUSED ", prev_previous_name, previous_name, name)
+ torch.quantization.fuse_modules(m, [prev_previous_name, previous_name, name], inplace=True)
+ elif prev_previous_type == nn.Conv2d and previous_type == nn.BatchNorm2d:
+ # print("FUSED ", prev_previous_name, previous_name)
+ torch.quantization.fuse_modules(m, [prev_previous_name, previous_name], inplace=True)
+ # elif previous_type == nn.Conv2d and type(module) == nn.ReLU:
+ # print("FUSED ", previous_name, name)
+ # torch.quantization.fuse_modules(m, [previous_name, name], inplace=True)
+
+ prev_previous_type = previous_type
+ prev_previous_name = previous_name
+ previous_type = type(module)
+ previous_name = name
\ No newline at end of file
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/model_loader.py b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/model_loader.py
new file mode 100644
index 0000000000000000000000000000000000000000..6c6db0d7d421cebba21da140eeb6eb09b8906196
--- /dev/null
+++ b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/model_loader.py
@@ -0,0 +1,242 @@
+import cv2
+import torch
+
+from midas.dpt_depth import DPTDepthModel
+from midas.midas_net import MidasNet
+from midas.midas_net_custom import MidasNet_small
+from midas.transforms import Resize, NormalizeImage, PrepareForNet
+
+from torchvision.transforms import Compose
+
+default_models = {
+ "dpt_beit_large_512": "weights/dpt_beit_large_512.pt",
+ "dpt_beit_large_384": "weights/dpt_beit_large_384.pt",
+ "dpt_beit_base_384": "weights/dpt_beit_base_384.pt",
+ "dpt_swin2_large_384": "weights/dpt_swin2_large_384.pt",
+ "dpt_swin2_base_384": "weights/dpt_swin2_base_384.pt",
+ "dpt_swin2_tiny_256": "weights/dpt_swin2_tiny_256.pt",
+ "dpt_swin_large_384": "weights/dpt_swin_large_384.pt",
+ "dpt_next_vit_large_384": "weights/dpt_next_vit_large_384.pt",
+ "dpt_levit_224": "weights/dpt_levit_224.pt",
+ "dpt_large_384": "weights/dpt_large_384.pt",
+ "dpt_hybrid_384": "weights/dpt_hybrid_384.pt",
+ "midas_v21_384": "weights/midas_v21_384.pt",
+ "midas_v21_small_256": "weights/midas_v21_small_256.pt",
+ "openvino_midas_v21_small_256": "weights/openvino_midas_v21_small_256.xml",
+}
+
+
+def load_model(device, model_path, model_type="dpt_large_384", optimize=True, height=None, square=False):
+ """Load the specified network.
+
+ Args:
+ device (device): the torch device used
+ model_path (str): path to saved model
+ model_type (str): the type of the model to be loaded
+ optimize (bool): optimize the model to half-integer on CUDA?
+ height (int): inference encoder image height
+ square (bool): resize to a square resolution?
+
+ Returns:
+ The loaded network, the transform which prepares images as input to the network and the dimensions of the
+ network input
+ """
+ if "openvino" in model_type:
+ from openvino.runtime import Core
+
+ keep_aspect_ratio = not square
+
+ if model_type == "dpt_beit_large_512":
+ model = DPTDepthModel(
+ path=model_path,
+ backbone="beitl16_512",
+ non_negative=True,
+ )
+ net_w, net_h = 512, 512
+ resize_mode = "minimal"
+ normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
+
+ elif model_type == "dpt_beit_large_384":
+ model = DPTDepthModel(
+ path=model_path,
+ backbone="beitl16_384",
+ non_negative=True,
+ )
+ net_w, net_h = 384, 384
+ resize_mode = "minimal"
+ normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
+
+ elif model_type == "dpt_beit_base_384":
+ model = DPTDepthModel(
+ path=model_path,
+ backbone="beitb16_384",
+ non_negative=True,
+ )
+ net_w, net_h = 384, 384
+ resize_mode = "minimal"
+ normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
+
+ elif model_type == "dpt_swin2_large_384":
+ model = DPTDepthModel(
+ path=model_path,
+ backbone="swin2l24_384",
+ non_negative=True,
+ )
+ net_w, net_h = 384, 384
+ keep_aspect_ratio = False
+ resize_mode = "minimal"
+ normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
+
+ elif model_type == "dpt_swin2_base_384":
+ model = DPTDepthModel(
+ path=model_path,
+ backbone="swin2b24_384",
+ non_negative=True,
+ )
+ net_w, net_h = 384, 384
+ keep_aspect_ratio = False
+ resize_mode = "minimal"
+ normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
+
+ elif model_type == "dpt_swin2_tiny_256":
+ model = DPTDepthModel(
+ path=model_path,
+ backbone="swin2t16_256",
+ non_negative=True,
+ )
+ net_w, net_h = 256, 256
+ keep_aspect_ratio = False
+ resize_mode = "minimal"
+ normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
+
+ elif model_type == "dpt_swin_large_384":
+ model = DPTDepthModel(
+ path=model_path,
+ backbone="swinl12_384",
+ non_negative=True,
+ )
+ net_w, net_h = 384, 384
+ keep_aspect_ratio = False
+ resize_mode = "minimal"
+ normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
+
+ elif model_type == "dpt_next_vit_large_384":
+ model = DPTDepthModel(
+ path=model_path,
+ backbone="next_vit_large_6m",
+ non_negative=True,
+ )
+ net_w, net_h = 384, 384
+ resize_mode = "minimal"
+ normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
+
+ # We change the notation from dpt_levit_224 (MiDaS notation) to levit_384 (timm notation) here, where the 224 refers
+ # to the resolution 224x224 used by LeViT and 384 is the first entry of the embed_dim, see _cfg and model_cfgs of
+ # https://github.com/rwightman/pytorch-image-models/blob/main/timm/models/levit.py
+ # (commit id: 927f031293a30afb940fff0bee34b85d9c059b0e)
+ elif model_type == "dpt_levit_224":
+ model = DPTDepthModel(
+ path=model_path,
+ backbone="levit_384",
+ non_negative=True,
+ head_features_1=64,
+ head_features_2=8,
+ )
+ net_w, net_h = 224, 224
+ keep_aspect_ratio = False
+ resize_mode = "minimal"
+ normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
+
+ elif model_type == "dpt_large_384":
+ model = DPTDepthModel(
+ path=model_path,
+ backbone="vitl16_384",
+ non_negative=True,
+ )
+ net_w, net_h = 384, 384
+ resize_mode = "minimal"
+ normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
+
+ elif model_type == "dpt_hybrid_384":
+ model = DPTDepthModel(
+ path=model_path,
+ backbone="vitb_rn50_384",
+ non_negative=True,
+ )
+ net_w, net_h = 384, 384
+ resize_mode = "minimal"
+ normalization = NormalizeImage(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
+
+ elif model_type == "midas_v21_384":
+ model = MidasNet(model_path, non_negative=True)
+ net_w, net_h = 384, 384
+ resize_mode = "upper_bound"
+ normalization = NormalizeImage(
+ mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
+ )
+
+ elif model_type == "midas_v21_small_256":
+ model = MidasNet_small(model_path, features=64, backbone="efficientnet_lite3", exportable=True,
+ non_negative=True, blocks={'expand': True})
+ net_w, net_h = 256, 256
+ resize_mode = "upper_bound"
+ normalization = NormalizeImage(
+ mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
+ )
+
+ elif model_type == "openvino_midas_v21_small_256":
+ ie = Core()
+ uncompiled_model = ie.read_model(model=model_path)
+ model = ie.compile_model(uncompiled_model, "CPU")
+ net_w, net_h = 256, 256
+ resize_mode = "upper_bound"
+ normalization = NormalizeImage(
+ mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
+ )
+
+ else:
+ print(f"model_type '{model_type}' not implemented, use: --model_type large")
+ assert False
+
+ if not "openvino" in model_type:
+ print("Model loaded, number of parameters = {:.0f}M".format(sum(p.numel() for p in model.parameters()) / 1e6))
+ else:
+ print("Model loaded, optimized with OpenVINO")
+
+ if "openvino" in model_type:
+ keep_aspect_ratio = False
+
+ if height is not None:
+ net_w, net_h = height, height
+
+ transform = Compose(
+ [
+ Resize(
+ net_w,
+ net_h,
+ resize_target=None,
+ keep_aspect_ratio=keep_aspect_ratio,
+ ensure_multiple_of=32,
+ resize_method=resize_mode,
+ image_interpolation_method=cv2.INTER_CUBIC,
+ ),
+ normalization,
+ PrepareForNet(),
+ ]
+ )
+
+ if not "openvino" in model_type:
+ model.eval()
+
+ if optimize and (device == torch.device("cuda")):
+ if not "openvino" in model_type:
+ model = model.to(memory_format=torch.channels_last)
+ model = model.half()
+ else:
+ print("Error: OpenVINO models are already optimized. No optimization to half-float possible.")
+ exit()
+
+ if not "openvino" in model_type:
+ model.to(device)
+
+ return model, transform, net_w, net_h
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/transforms.py b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/transforms.py
new file mode 100644
index 0000000000000000000000000000000000000000..37d68afcebf67defadd7dfb2ff5494f257876575
--- /dev/null
+++ b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/transforms.py
@@ -0,0 +1,234 @@
+import numpy as np
+import cv2
+import math
+
+
+def apply_min_size(sample, size, image_interpolation_method=cv2.INTER_AREA):
+ """Rezise the sample to ensure the given size. Keeps aspect ratio.
+
+ Args:
+ sample (dict): sample
+ size (tuple): image size
+
+ Returns:
+ tuple: new size
+ """
+ shape = list(sample["disparity"].shape)
+
+ if shape[0] >= size[0] and shape[1] >= size[1]:
+ return sample
+
+ scale = [0, 0]
+ scale[0] = size[0] / shape[0]
+ scale[1] = size[1] / shape[1]
+
+ scale = max(scale)
+
+ shape[0] = math.ceil(scale * shape[0])
+ shape[1] = math.ceil(scale * shape[1])
+
+ # resize
+ sample["image"] = cv2.resize(
+ sample["image"], tuple(shape[::-1]), interpolation=image_interpolation_method
+ )
+
+ sample["disparity"] = cv2.resize(
+ sample["disparity"], tuple(shape[::-1]), interpolation=cv2.INTER_NEAREST
+ )
+ sample["mask"] = cv2.resize(
+ sample["mask"].astype(np.float32),
+ tuple(shape[::-1]),
+ interpolation=cv2.INTER_NEAREST,
+ )
+ sample["mask"] = sample["mask"].astype(bool)
+
+ return tuple(shape)
+
+
+class Resize(object):
+ """Resize sample to given size (width, height).
+ """
+
+ def __init__(
+ self,
+ width,
+ height,
+ resize_target=True,
+ keep_aspect_ratio=False,
+ ensure_multiple_of=1,
+ resize_method="lower_bound",
+ image_interpolation_method=cv2.INTER_AREA,
+ ):
+ """Init.
+
+ Args:
+ width (int): desired output width
+ height (int): desired output height
+ resize_target (bool, optional):
+ True: Resize the full sample (image, mask, target).
+ False: Resize image only.
+ Defaults to True.
+ keep_aspect_ratio (bool, optional):
+ True: Keep the aspect ratio of the input sample.
+ Output sample might not have the given width and height, and
+ resize behaviour depends on the parameter 'resize_method'.
+ Defaults to False.
+ ensure_multiple_of (int, optional):
+ Output width and height is constrained to be multiple of this parameter.
+ Defaults to 1.
+ resize_method (str, optional):
+ "lower_bound": Output will be at least as large as the given size.
+ "upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.)
+ "minimal": Scale as least as possible. (Output size might be smaller than given size.)
+ Defaults to "lower_bound".
+ """
+ self.__width = width
+ self.__height = height
+
+ self.__resize_target = resize_target
+ self.__keep_aspect_ratio = keep_aspect_ratio
+ self.__multiple_of = ensure_multiple_of
+ self.__resize_method = resize_method
+ self.__image_interpolation_method = image_interpolation_method
+
+ def constrain_to_multiple_of(self, x, min_val=0, max_val=None):
+ y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int)
+
+ if max_val is not None and y > max_val:
+ y = (np.floor(x / self.__multiple_of) * self.__multiple_of).astype(int)
+
+ if y < min_val:
+ y = (np.ceil(x / self.__multiple_of) * self.__multiple_of).astype(int)
+
+ return y
+
+ def get_size(self, width, height):
+ # determine new height and width
+ scale_height = self.__height / height
+ scale_width = self.__width / width
+
+ if self.__keep_aspect_ratio:
+ if self.__resize_method == "lower_bound":
+ # scale such that output size is lower bound
+ if scale_width > scale_height:
+ # fit width
+ scale_height = scale_width
+ else:
+ # fit height
+ scale_width = scale_height
+ elif self.__resize_method == "upper_bound":
+ # scale such that output size is upper bound
+ if scale_width < scale_height:
+ # fit width
+ scale_height = scale_width
+ else:
+ # fit height
+ scale_width = scale_height
+ elif self.__resize_method == "minimal":
+ # scale as least as possbile
+ if abs(1 - scale_width) < abs(1 - scale_height):
+ # fit width
+ scale_height = scale_width
+ else:
+ # fit height
+ scale_width = scale_height
+ else:
+ raise ValueError(
+ f"resize_method {self.__resize_method} not implemented"
+ )
+
+ if self.__resize_method == "lower_bound":
+ new_height = self.constrain_to_multiple_of(
+ scale_height * height, min_val=self.__height
+ )
+ new_width = self.constrain_to_multiple_of(
+ scale_width * width, min_val=self.__width
+ )
+ elif self.__resize_method == "upper_bound":
+ new_height = self.constrain_to_multiple_of(
+ scale_height * height, max_val=self.__height
+ )
+ new_width = self.constrain_to_multiple_of(
+ scale_width * width, max_val=self.__width
+ )
+ elif self.__resize_method == "minimal":
+ new_height = self.constrain_to_multiple_of(scale_height * height)
+ new_width = self.constrain_to_multiple_of(scale_width * width)
+ else:
+ raise ValueError(f"resize_method {self.__resize_method} not implemented")
+
+ return (new_width, new_height)
+
+ def __call__(self, sample):
+ width, height = self.get_size(
+ sample["image"].shape[1], sample["image"].shape[0]
+ )
+
+ # resize sample
+ sample["image"] = cv2.resize(
+ sample["image"],
+ (width, height),
+ interpolation=self.__image_interpolation_method,
+ )
+
+ if self.__resize_target:
+ if "disparity" in sample:
+ sample["disparity"] = cv2.resize(
+ sample["disparity"],
+ (width, height),
+ interpolation=cv2.INTER_NEAREST,
+ )
+
+ if "depth" in sample:
+ sample["depth"] = cv2.resize(
+ sample["depth"], (width, height), interpolation=cv2.INTER_NEAREST
+ )
+
+ sample["mask"] = cv2.resize(
+ sample["mask"].astype(np.float32),
+ (width, height),
+ interpolation=cv2.INTER_NEAREST,
+ )
+ sample["mask"] = sample["mask"].astype(bool)
+
+ return sample
+
+
+class NormalizeImage(object):
+ """Normlize image by given mean and std.
+ """
+
+ def __init__(self, mean, std):
+ self.__mean = mean
+ self.__std = std
+
+ def __call__(self, sample):
+ sample["image"] = (sample["image"] - self.__mean) / self.__std
+
+ return sample
+
+
+class PrepareForNet(object):
+ """Prepare sample for usage as network input.
+ """
+
+ def __init__(self):
+ pass
+
+ def __call__(self, sample):
+ image = np.transpose(sample["image"], (2, 0, 1))
+ sample["image"] = np.ascontiguousarray(image).astype(np.float32)
+
+ if "mask" in sample:
+ sample["mask"] = sample["mask"].astype(np.float32)
+ sample["mask"] = np.ascontiguousarray(sample["mask"])
+
+ if "disparity" in sample:
+ disparity = sample["disparity"].astype(np.float32)
+ sample["disparity"] = np.ascontiguousarray(disparity)
+
+ if "depth" in sample:
+ depth = sample["depth"].astype(np.float32)
+ sample["depth"] = np.ascontiguousarray(depth)
+
+ return sample
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/LICENSE b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/LICENSE
new file mode 100644
index 0000000000000000000000000000000000000000..00ac5912e5fb9d000475d6dd6a0eec15f98b3b70
--- /dev/null
+++ b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2020 Alexey
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/README.md b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..ed0c7e8465d69c21928e86996b5222b2d3fbfd5d
--- /dev/null
+++ b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/README.md
@@ -0,0 +1,131 @@
+# MiDaS for ROS1 by using LibTorch in C++
+
+### Requirements
+
+- Ubuntu 17.10 / 18.04 / 20.04, Debian Stretch
+- ROS Melodic for Ubuntu (17.10 / 18.04) / Debian Stretch, ROS Noetic for Ubuntu 20.04
+- C++11
+- LibTorch >= 1.6
+
+## Quick Start with a MiDaS Example
+
+MiDaS is a neural network to compute depth from a single image.
+
+* input from `image_topic`: `sensor_msgs/Image` - `RGB8` image with any shape
+* output to `midas_topic`: `sensor_msgs/Image` - `TYPE_32FC1` inverse relative depth maps in range [0 - 255] with original size and channels=1
+
+### Install Dependecies
+
+* install ROS Melodic for Ubuntu 17.10 / 18.04:
+```bash
+wget https://raw.githubusercontent.com/isl-org/MiDaS/master/ros/additions/install_ros_melodic_ubuntu_17_18.sh
+./install_ros_melodic_ubuntu_17_18.sh
+```
+
+or Noetic for Ubuntu 20.04:
+
+```bash
+wget https://raw.githubusercontent.com/isl-org/MiDaS/master/ros/additions/install_ros_noetic_ubuntu_20.sh
+./install_ros_noetic_ubuntu_20.sh
+```
+
+
+* install LibTorch 1.7 with CUDA 11.0:
+
+On **Jetson (ARM)**:
+```bash
+wget https://nvidia.box.com/shared/static/wa34qwrwtk9njtyarwt5nvo6imenfy26.whl -O torch-1.7.0-cp36-cp36m-linux_aarch64.whl
+sudo apt-get install python3-pip libopenblas-base libopenmpi-dev
+pip3 install Cython
+pip3 install numpy torch-1.7.0-cp36-cp36m-linux_aarch64.whl
+```
+Or compile LibTorch from source: https://github.com/pytorch/pytorch#from-source
+
+On **Linux (x86_64)**:
+```bash
+cd ~/
+wget https://download.pytorch.org/libtorch/cu110/libtorch-cxx11-abi-shared-with-deps-1.7.0%2Bcu110.zip
+unzip libtorch-cxx11-abi-shared-with-deps-1.7.0+cu110.zip
+```
+
+* create symlink for OpenCV:
+
+```bash
+sudo ln -s /usr/include/opencv4 /usr/include/opencv
+```
+
+* download and install MiDaS:
+
+```bash
+source ~/.bashrc
+cd ~/
+mkdir catkin_ws
+cd catkin_ws
+git clone https://github.com/isl-org/MiDaS
+mkdir src
+cp -r MiDaS/ros/* src
+
+chmod +x src/additions/*.sh
+chmod +x src/*.sh
+chmod +x src/midas_cpp/scripts/*.py
+cp src/additions/do_catkin_make.sh ./do_catkin_make.sh
+./do_catkin_make.sh
+./src/additions/downloads.sh
+```
+
+### Usage
+
+* run only `midas` node: `~/catkin_ws/src/launch_midas_cpp.sh`
+
+#### Test
+
+* Test - capture video and show result in the window:
+ * place any `test.mp4` video file to the directory `~/catkin_ws/src/`
+ * run `midas` node: `~/catkin_ws/src/launch_midas_cpp.sh`
+ * run test nodes in another terminal: `cd ~/catkin_ws/src && ./run_talker_listener_test.sh` and wait 30 seconds
+
+ (to use Python 2, run command `sed -i 's/python3/python2/' ~/catkin_ws/src/midas_cpp/scripts/*.py` )
+
+## Mobile version of MiDaS - Monocular Depth Estimation
+
+### Accuracy
+
+* MiDaS v2 small - ResNet50 default-decoder 384x384
+* MiDaS v2.1 small - EfficientNet-Lite3 small-decoder 256x256
+
+**Zero-shot error** (the lower - the better):
+
+| Model | DIW WHDR | Eth3d AbsRel | Sintel AbsRel | Kitti δ>1.25 | NyuDepthV2 δ>1.25 | TUM δ>1.25 |
+|---|---|---|---|---|---|---|
+| MiDaS v2 small 384x384 | **0.1248** | 0.1550 | **0.3300** | **21.81** | 15.73 | 17.00 |
+| MiDaS v2.1 small 256x256 | 0.1344 | **0.1344** | 0.3370 | 29.27 | **13.43** | **14.53** |
+| Relative improvement, % | -8 % | **+13 %** | -2 % | -34 % | **+15 %** | **+15 %** |
+
+None of Train/Valid/Test subsets of datasets (DIW, Eth3d, Sintel, Kitti, NyuDepthV2, TUM) were not involved in Training or Fine Tuning.
+
+### Inference speed (FPS) on nVidia GPU
+
+Inference speed excluding pre and post processing, batch=1, **Frames Per Second** (the higher - the better):
+
+| Model | Jetson Nano, FPS | RTX 2080Ti, FPS |
+|---|---|---|
+| MiDaS v2 small 384x384 | 1.6 | 117 |
+| MiDaS v2.1 small 256x256 | 8.1 | 232 |
+| SpeedUp, X times | **5x** | **2x** |
+
+### Citation
+
+This repository contains code to compute depth from a single image. It accompanies our [paper](https://arxiv.org/abs/1907.01341v3):
+
+>Towards Robust Monocular Depth Estimation: Mixing Datasets for Zero-shot Cross-dataset Transfer
+René Ranftl, Katrin Lasinger, David Hafner, Konrad Schindler, Vladlen Koltun
+
+Please cite our paper if you use this code or any of the models:
+```
+@article{Ranftl2020,
+ author = {Ren\'{e} Ranftl and Katrin Lasinger and David Hafner and Konrad Schindler and Vladlen Koltun},
+ title = {Towards Robust Monocular Depth Estimation: Mixing Datasets for Zero-shot Cross-dataset Transfer},
+ journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI)},
+ year = {2020},
+}
+```
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/additions/do_catkin_make.sh b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/additions/do_catkin_make.sh
new file mode 100644
index 0000000000000000000000000000000000000000..291354217538efebf3464a77573f17961e81399d
--- /dev/null
+++ b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/additions/do_catkin_make.sh
@@ -0,0 +1,5 @@
+mkdir src
+catkin_make
+source devel/setup.bash
+echo $ROS_PACKAGE_PATH
+chmod +x ./devel/setup.bash
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/additions/downloads.sh b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/additions/downloads.sh
new file mode 100644
index 0000000000000000000000000000000000000000..6d41fb4d621e71075e70d716d060f78a465ce943
--- /dev/null
+++ b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/additions/downloads.sh
@@ -0,0 +1,5 @@
+mkdir ~/.ros
+wget https://github.com/isl-org/MiDaS/releases/download/v2_1/model-small-traced.pt
+cp ./model-small-traced.pt ~/.ros/model-small-traced.pt
+
+
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/additions/install_ros_melodic_ubuntu_17_18.sh b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/additions/install_ros_melodic_ubuntu_17_18.sh
new file mode 100644
index 0000000000000000000000000000000000000000..15630cf4ee85a415d932dc6ed3da7e7da8dad6a9
--- /dev/null
+++ b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/additions/install_ros_melodic_ubuntu_17_18.sh
@@ -0,0 +1,34 @@
+#@title { display-mode: "code" }
+
+#from http://wiki.ros.org/indigo/Installation/Ubuntu
+
+#1.2 Setup sources.list
+sudo sh -c 'echo "deb http://packages.ros.org/ros/ubuntu $(lsb_release -sc) main" > /etc/apt/sources.list.d/ros-latest.list'
+
+# 1.3 Setup keys
+sudo apt-key adv --keyserver 'hkp://keyserver.ubuntu.com:80' --recv-key C1CF6E31E6BADE8868B172B4F42ED6FBAB17C654
+sudo apt-key adv --keyserver 'hkp://ha.pool.sks-keyservers.net:80' --recv-key 421C365BD9FF1F717815A3895523BAEEB01FA116
+
+curl -sSL 'http://keyserver.ubuntu.com/pks/lookup?op=get&search=0xC1CF6E31E6BADE8868B172B4F42ED6FBAB17C654' | sudo apt-key add -
+
+# 1.4 Installation
+sudo apt-get update
+sudo apt-get upgrade
+
+# Desktop-Full Install:
+sudo apt-get install ros-melodic-desktop-full
+
+printf "\nsource /opt/ros/melodic/setup.bash\n" >> ~/.bashrc
+
+# 1.5 Initialize rosdep
+sudo rosdep init
+rosdep update
+
+
+# 1.7 Getting rosinstall (python)
+sudo apt-get install python-rosinstall
+sudo apt-get install python-catkin-tools
+sudo apt-get install python-rospy
+sudo apt-get install python-rosdep
+sudo apt-get install python-roscd
+sudo apt-get install python-pip
\ No newline at end of file
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/additions/install_ros_noetic_ubuntu_20.sh b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/additions/install_ros_noetic_ubuntu_20.sh
new file mode 100644
index 0000000000000000000000000000000000000000..4c62e758a8320ed71a60a1b9b591f0dfc21f04f4
--- /dev/null
+++ b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/additions/install_ros_noetic_ubuntu_20.sh
@@ -0,0 +1,33 @@
+#@title { display-mode: "code" }
+
+#from http://wiki.ros.org/indigo/Installation/Ubuntu
+
+#1.2 Setup sources.list
+sudo sh -c 'echo "deb http://packages.ros.org/ros/ubuntu $(lsb_release -sc) main" > /etc/apt/sources.list.d/ros-latest.list'
+
+# 1.3 Setup keys
+sudo apt-key adv --keyserver 'hkp://keyserver.ubuntu.com:80' --recv-key C1CF6E31E6BADE8868B172B4F42ED6FBAB17C654
+
+curl -sSL 'http://keyserver.ubuntu.com/pks/lookup?op=get&search=0xC1CF6E31E6BADE8868B172B4F42ED6FBAB17C654' | sudo apt-key add -
+
+# 1.4 Installation
+sudo apt-get update
+sudo apt-get upgrade
+
+# Desktop-Full Install:
+sudo apt-get install ros-noetic-desktop-full
+
+printf "\nsource /opt/ros/noetic/setup.bash\n" >> ~/.bashrc
+
+# 1.5 Initialize rosdep
+sudo rosdep init
+rosdep update
+
+
+# 1.7 Getting rosinstall (python)
+sudo apt-get install python3-rosinstall
+sudo apt-get install python3-catkin-tools
+sudo apt-get install python3-rospy
+sudo apt-get install python3-rosdep
+sudo apt-get install python3-roscd
+sudo apt-get install python3-pip
\ No newline at end of file
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/additions/make_package_cpp.sh b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/additions/make_package_cpp.sh
new file mode 100644
index 0000000000000000000000000000000000000000..82781ac1ecbdde527feb7dc3f5cd7d97a0ea1886
--- /dev/null
+++ b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/additions/make_package_cpp.sh
@@ -0,0 +1,16 @@
+cd ~/catkin_ws/src
+catkin_create_pkg midas_cpp std_msgs roscpp cv_bridge sensor_msgs image_transport
+cd ~/catkin_ws
+catkin_make
+
+chmod +x ~/catkin_ws/devel/setup.bash
+printf "\nsource ~/catkin_ws/devel/setup.bash" >> ~/.bashrc
+source ~/catkin_ws/devel/setup.bash
+
+
+sudo rosdep init
+rosdep update
+#rospack depends1 midas_cpp
+roscd midas_cpp
+#cat package.xml
+#rospack depends midas_cpp
\ No newline at end of file
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/launch_midas_cpp.sh b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/launch_midas_cpp.sh
new file mode 100644
index 0000000000000000000000000000000000000000..63e138549417f49173107ccbb1cce40f8fd46532
--- /dev/null
+++ b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/launch_midas_cpp.sh
@@ -0,0 +1,2 @@
+source ~/catkin_ws/devel/setup.bash
+roslaunch midas_cpp midas_cpp.launch model_name:="model-small-traced.pt" input_topic:="image_topic" output_topic:="midas_topic" out_orig_size:="true"
\ No newline at end of file
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/CMakeLists.txt b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/CMakeLists.txt
new file mode 100644
index 0000000000000000000000000000000000000000..63ae893bff0e5bb113cfd9a5c24186189dcb8b9a
--- /dev/null
+++ b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/CMakeLists.txt
@@ -0,0 +1,189 @@
+cmake_minimum_required(VERSION 3.0.2)
+project(midas_cpp)
+
+## Compile as C++11, supported in ROS Kinetic and newer
+# add_compile_options(-std=c++11)
+
+## Find catkin macros and libraries
+## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
+## is used, also find other catkin packages
+find_package(catkin REQUIRED COMPONENTS
+ cv_bridge
+ image_transport
+ roscpp
+ rospy
+ sensor_msgs
+ std_msgs
+)
+
+## System dependencies are found with CMake's conventions
+# find_package(Boost REQUIRED COMPONENTS system)
+
+list(APPEND CMAKE_PREFIX_PATH "~/libtorch")
+list(APPEND CMAKE_PREFIX_PATH "/usr/local/lib/python3.6/dist-packages/torch/lib")
+list(APPEND CMAKE_PREFIX_PATH "/usr/local/lib/python2.7/dist-packages/torch/lib")
+
+if(NOT EXISTS "~/libtorch")
+ if (EXISTS "/usr/local/lib/python3.6/dist-packages/torch")
+ include_directories(/usr/local/include)
+ include_directories(/usr/local/lib/python3.6/dist-packages/torch/include/torch/csrc/api/include)
+ include_directories(/usr/local/lib/python3.6/dist-packages/torch/include)
+
+ link_directories(/usr/local/lib)
+ link_directories(/usr/local/lib/python3.6/dist-packages/torch/lib)
+
+ set(CMAKE_PREFIX_PATH /usr/local/lib/python3.6/dist-packages/torch)
+ set(Boost_USE_MULTITHREADED ON)
+ set(Torch_DIR /usr/local/lib/python3.6/dist-packages/torch)
+
+ elseif (EXISTS "/usr/local/lib/python2.7/dist-packages/torch")
+
+ include_directories(/usr/local/include)
+ include_directories(/usr/local/lib/python2.7/dist-packages/torch/include/torch/csrc/api/include)
+ include_directories(/usr/local/lib/python2.7/dist-packages/torch/include)
+
+ link_directories(/usr/local/lib)
+ link_directories(/usr/local/lib/python2.7/dist-packages/torch/lib)
+
+ set(CMAKE_PREFIX_PATH /usr/local/lib/python2.7/dist-packages/torch)
+ set(Boost_USE_MULTITHREADED ON)
+ set(Torch_DIR /usr/local/lib/python2.7/dist-packages/torch)
+ endif()
+endif()
+
+
+
+find_package(Torch REQUIRED)
+find_package(OpenCV REQUIRED)
+include_directories( ${OpenCV_INCLUDE_DIRS} )
+
+add_executable(midas_cpp src/main.cpp)
+target_link_libraries(midas_cpp "${TORCH_LIBRARIES}" "${OpenCV_LIBS} ${catkin_LIBRARIES}")
+set_property(TARGET midas_cpp PROPERTY CXX_STANDARD 14)
+
+
+
+###################################
+## catkin specific configuration ##
+###################################
+## The catkin_package macro generates cmake config files for your package
+## Declare things to be passed to dependent projects
+## INCLUDE_DIRS: uncomment this if your package contains header files
+## LIBRARIES: libraries you create in this project that dependent projects also need
+## CATKIN_DEPENDS: catkin_packages dependent projects also need
+## DEPENDS: system dependencies of this project that dependent projects also need
+catkin_package(
+# INCLUDE_DIRS include
+# LIBRARIES midas_cpp
+# CATKIN_DEPENDS cv_bridge image_transport roscpp sensor_msgs std_msgs
+# DEPENDS system_lib
+)
+
+###########
+## Build ##
+###########
+
+## Specify additional locations of header files
+## Your package locations should be listed before other locations
+include_directories(
+# include
+ ${catkin_INCLUDE_DIRS}
+)
+
+## Declare a C++ library
+# add_library(${PROJECT_NAME}
+# src/${PROJECT_NAME}/midas_cpp.cpp
+# )
+
+## Add cmake target dependencies of the library
+## as an example, code may need to be generated before libraries
+## either from message generation or dynamic reconfigure
+# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
+
+## Declare a C++ executable
+## With catkin_make all packages are built within a single CMake context
+## The recommended prefix ensures that target names across packages don't collide
+# add_executable(${PROJECT_NAME}_node src/midas_cpp_node.cpp)
+
+## Rename C++ executable without prefix
+## The above recommended prefix causes long target names, the following renames the
+## target back to the shorter version for ease of user use
+## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node"
+# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "")
+
+## Add cmake target dependencies of the executable
+## same as for the library above
+# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
+
+## Specify libraries to link a library or executable target against
+# target_link_libraries(${PROJECT_NAME}_node
+# ${catkin_LIBRARIES}
+# )
+
+#############
+## Install ##
+#############
+
+# all install targets should use catkin DESTINATION variables
+# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html
+
+## Mark executable scripts (Python etc.) for installation
+## in contrast to setup.py, you can choose the destination
+# catkin_install_python(PROGRAMS
+# scripts/my_python_script
+# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
+# )
+
+## Mark executables for installation
+## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html
+# install(TARGETS ${PROJECT_NAME}_node
+# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
+# )
+
+## Mark libraries for installation
+## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html
+# install(TARGETS ${PROJECT_NAME}
+# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
+# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
+# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION}
+# )
+
+## Mark cpp header files for installation
+# install(DIRECTORY include/${PROJECT_NAME}/
+# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
+# FILES_MATCHING PATTERN "*.h"
+# PATTERN ".svn" EXCLUDE
+# )
+
+## Mark other files for installation (e.g. launch and bag files, etc.)
+# install(FILES
+# # myfile1
+# # myfile2
+# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
+# )
+
+#############
+## Testing ##
+#############
+
+## Add gtest based cpp test target and link libraries
+# catkin_add_gtest(${PROJECT_NAME}-test test/test_midas_cpp.cpp)
+# if(TARGET ${PROJECT_NAME}-test)
+# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME})
+# endif()
+
+## Add folders to be run by python nosetests
+# catkin_add_nosetests(test)
+
+install(TARGETS ${PROJECT_NAME}
+ ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
+ LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
+ RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
+)
+
+add_custom_command(
+ TARGET midas_cpp POST_BUILD
+ COMMAND ${CMAKE_COMMAND} -E copy
+ ${CMAKE_CURRENT_BINARY_DIR}/midas_cpp
+ ${CMAKE_SOURCE_DIR}/midas_cpp
+)
\ No newline at end of file
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/launch/midas_cpp.launch b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/launch/midas_cpp.launch
new file mode 100644
index 0000000000000000000000000000000000000000..326b7dfb19e928a9199a633c45e630dd2ef22bc0
--- /dev/null
+++ b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/launch/midas_cpp.launch
@@ -0,0 +1,19 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/launch/midas_talker_listener.launch b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/launch/midas_talker_listener.launch
new file mode 100644
index 0000000000000000000000000000000000000000..88d10c59b53b7031ef5c35424317b86991f2af8a
--- /dev/null
+++ b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/launch/midas_talker_listener.launch
@@ -0,0 +1,23 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/package.xml b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/package.xml
new file mode 100644
index 0000000000000000000000000000000000000000..6a0d109e65dab0b7652bba809074ae012a175b7e
--- /dev/null
+++ b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/package.xml
@@ -0,0 +1,77 @@
+
+
+ midas_cpp
+ 0.1.0
+ The midas_cpp package
+
+ Alexey Bochkovskiy
+ MIT
+ https://github.com/isl-org/MiDaS/tree/master/ros
+
+
+
+
+
+
+ TODO
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ catkin
+ cv_bridge
+ image_transport
+ roscpp
+ rospy
+ sensor_msgs
+ std_msgs
+ cv_bridge
+ image_transport
+ roscpp
+ rospy
+ sensor_msgs
+ std_msgs
+ cv_bridge
+ image_transport
+ roscpp
+ rospy
+ sensor_msgs
+ std_msgs
+
+
+
+
+
+
+
+
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/scripts/listener.py b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/scripts/listener.py
new file mode 100644
index 0000000000000000000000000000000000000000..7ad3f437dedd3759dc6d288d18499c93581bca26
--- /dev/null
+++ b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/scripts/listener.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python3
+from __future__ import print_function
+
+import roslib
+#roslib.load_manifest('my_package')
+import sys
+import rospy
+import cv2
+import numpy as np
+from std_msgs.msg import String
+from sensor_msgs.msg import Image
+from cv_bridge import CvBridge, CvBridgeError
+
+class video_show:
+
+ def __init__(self):
+ self.show_output = rospy.get_param('~show_output', True)
+ self.save_output = rospy.get_param('~save_output', False)
+ self.output_video_file = rospy.get_param('~output_video_file','result.mp4')
+ # rospy.loginfo(f"Listener - params: show_output={self.show_output}, save_output={self.save_output}, output_video_file={self.output_video_file}")
+
+ self.bridge = CvBridge()
+ self.image_sub = rospy.Subscriber("midas_topic", Image, self.callback)
+
+ def callback(self, data):
+ try:
+ cv_image = self.bridge.imgmsg_to_cv2(data)
+ except CvBridgeError as e:
+ print(e)
+ return
+
+ if cv_image.size == 0:
+ return
+
+ rospy.loginfo("Listener: Received new frame")
+ cv_image = cv_image.astype("uint8")
+
+ if self.show_output==True:
+ cv2.imshow("video_show", cv_image)
+ cv2.waitKey(10)
+
+ if self.save_output==True:
+ if self.video_writer_init==False:
+ fourcc = cv2.VideoWriter_fourcc(*'XVID')
+ self.out = cv2.VideoWriter(self.output_video_file, fourcc, 25, (cv_image.shape[1], cv_image.shape[0]))
+
+ self.out.write(cv_image)
+
+
+
+def main(args):
+ rospy.init_node('listener', anonymous=True)
+ ic = video_show()
+ try:
+ rospy.spin()
+ except KeyboardInterrupt:
+ print("Shutting down")
+ cv2.destroyAllWindows()
+
+if __name__ == '__main__':
+ main(sys.argv)
\ No newline at end of file
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/scripts/listener_original.py b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/scripts/listener_original.py
new file mode 100644
index 0000000000000000000000000000000000000000..59ec1be8f8c35559ca8938fb99ac81a14a494a4a
--- /dev/null
+++ b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/scripts/listener_original.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python3
+from __future__ import print_function
+
+import roslib
+#roslib.load_manifest('my_package')
+import sys
+import rospy
+import cv2
+import numpy as np
+from std_msgs.msg import String
+from sensor_msgs.msg import Image
+from cv_bridge import CvBridge, CvBridgeError
+
+class video_show:
+
+ def __init__(self):
+ self.show_output = rospy.get_param('~show_output', True)
+ self.save_output = rospy.get_param('~save_output', False)
+ self.output_video_file = rospy.get_param('~output_video_file','result.mp4')
+ # rospy.loginfo(f"Listener original - params: show_output={self.show_output}, save_output={self.save_output}, output_video_file={self.output_video_file}")
+
+ self.bridge = CvBridge()
+ self.image_sub = rospy.Subscriber("image_topic", Image, self.callback)
+
+ def callback(self, data):
+ try:
+ cv_image = self.bridge.imgmsg_to_cv2(data)
+ except CvBridgeError as e:
+ print(e)
+ return
+
+ if cv_image.size == 0:
+ return
+
+ rospy.loginfo("Listener_original: Received new frame")
+ cv_image = cv_image.astype("uint8")
+
+ if self.show_output==True:
+ cv2.imshow("video_show_orig", cv_image)
+ cv2.waitKey(10)
+
+ if self.save_output==True:
+ if self.video_writer_init==False:
+ fourcc = cv2.VideoWriter_fourcc(*'XVID')
+ self.out = cv2.VideoWriter(self.output_video_file, fourcc, 25, (cv_image.shape[1], cv_image.shape[0]))
+
+ self.out.write(cv_image)
+
+
+
+def main(args):
+ rospy.init_node('listener_original', anonymous=True)
+ ic = video_show()
+ try:
+ rospy.spin()
+ except KeyboardInterrupt:
+ print("Shutting down")
+ cv2.destroyAllWindows()
+
+if __name__ == '__main__':
+ main(sys.argv)
\ No newline at end of file
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/scripts/talker.py b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/scripts/talker.py
new file mode 100644
index 0000000000000000000000000000000000000000..ddbeabf25ad41f5a765bdfa6cfaa3102331d1d7c
--- /dev/null
+++ b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/scripts/talker.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python3
+
+
+import roslib
+#roslib.load_manifest('my_package')
+import sys
+import rospy
+import cv2
+from std_msgs.msg import String
+from sensor_msgs.msg import Image
+from cv_bridge import CvBridge, CvBridgeError
+
+
+def talker():
+ rospy.init_node('talker', anonymous=True)
+
+ use_camera = rospy.get_param('~use_camera', False)
+ input_video_file = rospy.get_param('~input_video_file','test.mp4')
+ # rospy.loginfo(f"Talker - params: use_camera={use_camera}, input_video_file={input_video_file}")
+
+ # rospy.loginfo("Talker: Trying to open a video stream")
+ if use_camera == True:
+ cap = cv2.VideoCapture(0)
+ else:
+ cap = cv2.VideoCapture(input_video_file)
+
+ pub = rospy.Publisher('image_topic', Image, queue_size=1)
+ rate = rospy.Rate(30) # 30hz
+ bridge = CvBridge()
+
+ while not rospy.is_shutdown():
+ ret, cv_image = cap.read()
+ if ret==False:
+ print("Talker: Video is over")
+ rospy.loginfo("Video is over")
+ return
+
+ try:
+ image = bridge.cv2_to_imgmsg(cv_image, "bgr8")
+ except CvBridgeError as e:
+ rospy.logerr("Talker: cv2image conversion failed: ", e)
+ print(e)
+ continue
+
+ rospy.loginfo("Talker: Publishing frame")
+ pub.publish(image)
+ rate.sleep()
+
+if __name__ == '__main__':
+ try:
+ talker()
+ except rospy.ROSInterruptException:
+ pass
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/src/main.cpp b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/src/main.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..5ecc0c8a3232b933337c7512dfd48b4b4d01b157
--- /dev/null
+++ b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/src/main.cpp
@@ -0,0 +1,285 @@
+#include
+#include
+#include
+#include
+
+#include
+
+#include // One-stop header.
+
+#include
+#include
+#include
+#include
+
+#include
+#include
+
+// includes for OpenCV >= 3.x
+#ifndef CV_VERSION_EPOCH
+#include
+#include
+#include
+#endif
+
+// OpenCV includes for OpenCV 2.x
+#ifdef CV_VERSION_EPOCH
+#include
+#include
+#include
+#include
+#endif
+
+static const std::string OPENCV_WINDOW = "Image window";
+
+class Midas
+{
+ ros::NodeHandle nh_;
+ image_transport::ImageTransport it_;
+ image_transport::Subscriber image_sub_;
+ image_transport::Publisher image_pub_;
+
+ torch::jit::script::Module module;
+ torch::Device device;
+
+ auto ToTensor(cv::Mat img, bool show_output = false, bool unsqueeze = false, int unsqueeze_dim = 0)
+ {
+ //std::cout << "image shape: " << img.size() << std::endl;
+ at::Tensor tensor_image = torch::from_blob(img.data, { img.rows, img.cols, 3 }, at::kByte);
+
+ if (unsqueeze)
+ {
+ tensor_image.unsqueeze_(unsqueeze_dim);
+ //std::cout << "tensors new shape: " << tensor_image.sizes() << std::endl;
+ }
+
+ if (show_output)
+ {
+ std::cout << tensor_image.slice(2, 0, 1) << std::endl;
+ }
+ //std::cout << "tenor shape: " << tensor_image.sizes() << std::endl;
+ return tensor_image;
+ }
+
+ auto ToInput(at::Tensor tensor_image)
+ {
+ // Create a vector of inputs.
+ return std::vector{tensor_image};
+ }
+
+ auto ToCvImage(at::Tensor tensor, int cv_type = CV_8UC3)
+ {
+ int width = tensor.sizes()[0];
+ int height = tensor.sizes()[1];
+ try
+ {
+ cv::Mat output_mat;
+ if (cv_type == CV_8UC4 || cv_type == CV_8UC3 || cv_type == CV_8UC2 || cv_type == CV_8UC1) {
+ cv::Mat cv_image(cv::Size{ height, width }, cv_type, tensor.data_ptr());
+ output_mat = cv_image;
+ }
+ else if (cv_type == CV_32FC4 || cv_type == CV_32FC3 || cv_type == CV_32FC2 || cv_type == CV_32FC1) {
+ cv::Mat cv_image(cv::Size{ height, width }, cv_type, tensor.data_ptr());
+ output_mat = cv_image;
+ }
+ else if (cv_type == CV_64FC4 || cv_type == CV_64FC3 || cv_type == CV_64FC2 || cv_type == CV_64FC1) {
+ cv::Mat cv_image(cv::Size{ height, width }, cv_type, tensor.data_ptr());
+ output_mat = cv_image;
+ }
+
+ //show_image(output_mat, "converted image from tensor");
+ return output_mat.clone();
+ }
+ catch (const c10::Error& e)
+ {
+ std::cout << "an error has occured : " << e.msg() << std::endl;
+ }
+ return cv::Mat(height, width, CV_8UC3);
+ }
+
+ std::string input_topic, output_topic, model_name;
+ bool out_orig_size;
+ int net_width, net_height;
+ torch::NoGradGuard guard;
+ at::Tensor mean, std;
+ at::Tensor output, tensor;
+
+public:
+ Midas()
+ : nh_(), it_(nh_), device(torch::Device(torch::kCPU))
+ {
+ ros::param::param("~input_topic", input_topic, "image_topic");
+ ros::param::param("~output_topic", output_topic, "midas_topic");
+ ros::param::param("~model_name", model_name, "model-small-traced.pt");
+ ros::param::param("~out_orig_size", out_orig_size, true);
+ ros::param::param("~net_width", net_width, 256);
+ ros::param::param("~net_height", net_height, 256);
+
+ std::cout << ", input_topic = " << input_topic <<
+ ", output_topic = " << output_topic <<
+ ", model_name = " << model_name <<
+ ", out_orig_size = " << out_orig_size <<
+ ", net_width = " << net_width <<
+ ", net_height = " << net_height <<
+ std::endl;
+
+ // Subscrive to input video feed and publish output video feed
+ image_sub_ = it_.subscribe(input_topic, 1, &Midas::imageCb, this);
+ image_pub_ = it_.advertise(output_topic, 1);
+
+ std::cout << "Try to load torchscript model \n";
+
+ try {
+ // Deserialize the ScriptModule from a file using torch::jit::load().
+ module = torch::jit::load(model_name);
+ }
+ catch (const c10::Error& e) {
+ std::cerr << "error loading the model\n";
+ exit(0);
+ }
+
+ std::cout << "ok\n";
+
+ try {
+ module.eval();
+ torch::jit::getProfilingMode() = false;
+ torch::jit::setGraphExecutorOptimize(true);
+
+ mean = torch::tensor({ 0.485, 0.456, 0.406 });
+ std = torch::tensor({ 0.229, 0.224, 0.225 });
+
+ if (torch::hasCUDA()) {
+ std::cout << "cuda is available" << std::endl;
+ at::globalContext().setBenchmarkCuDNN(true);
+ device = torch::Device(torch::kCUDA);
+ module.to(device);
+ mean = mean.to(device);
+ std = std.to(device);
+ }
+ }
+ catch (const c10::Error& e)
+ {
+ std::cerr << " module initialization: " << e.msg() << std::endl;
+ }
+ }
+
+ ~Midas()
+ {
+ }
+
+ void imageCb(const sensor_msgs::ImageConstPtr& msg)
+ {
+ cv_bridge::CvImagePtr cv_ptr;
+ try
+ {
+ // sensor_msgs::Image to cv::Mat
+ cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::RGB8);
+ }
+ catch (cv_bridge::Exception& e)
+ {
+ ROS_ERROR("cv_bridge exception: %s", e.what());
+ return;
+ }
+
+ // pre-processing
+ auto tensor_cpu = ToTensor(cv_ptr->image); // OpenCV-image -> Libtorch-tensor
+
+ try {
+ tensor = tensor_cpu.to(device); // move to device (CPU or GPU)
+
+ tensor = tensor.toType(c10::kFloat);
+ tensor = tensor.permute({ 2, 0, 1 }); // HWC -> CHW
+ tensor = tensor.unsqueeze(0);
+ tensor = at::upsample_bilinear2d(tensor, { net_height, net_width }, true); // resize
+ tensor = tensor.squeeze(0);
+ tensor = tensor.permute({ 1, 2, 0 }); // CHW -> HWC
+
+ tensor = tensor.div(255).sub(mean).div(std); // normalization
+ tensor = tensor.permute({ 2, 0, 1 }); // HWC -> CHW
+ tensor.unsqueeze_(0); // CHW -> NCHW
+ }
+ catch (const c10::Error& e)
+ {
+ std::cerr << " pre-processing exception: " << e.msg() << std::endl;
+ return;
+ }
+
+ auto input_to_net = ToInput(tensor); // input to the network
+
+ // inference
+ output;
+ try {
+ output = module.forward(input_to_net).toTensor(); // run inference
+ }
+ catch (const c10::Error& e)
+ {
+ std::cerr << " module.forward() exception: " << e.msg() << std::endl;
+ return;
+ }
+
+ output = output.detach().to(torch::kF32);
+
+ // move to CPU temporary
+ at::Tensor output_tmp = output;
+ output_tmp = output_tmp.to(torch::kCPU);
+
+ // normalization
+ float min_val = std::numeric_limits::max();
+ float max_val = std::numeric_limits::min();
+
+ for (int i = 0; i < net_width * net_height; ++i) {
+ float val = output_tmp.data_ptr()[i];
+ if (min_val > val) min_val = val;
+ if (max_val < val) max_val = val;
+ }
+ float range_val = max_val - min_val;
+
+ output = output.sub(min_val).div(range_val).mul(255.0F).clamp(0, 255).to(torch::kF32); // .to(torch::kU8);
+
+ // resize to the original size if required
+ if (out_orig_size) {
+ try {
+ output = at::upsample_bilinear2d(output.unsqueeze(0), { cv_ptr->image.size().height, cv_ptr->image.size().width }, true);
+ output = output.squeeze(0);
+ }
+ catch (const c10::Error& e)
+ {
+ std::cout << " upsample_bilinear2d() exception: " << e.msg() << std::endl;
+ return;
+ }
+ }
+ output = output.permute({ 1, 2, 0 }).to(torch::kCPU);
+
+ int cv_type = CV_32FC1; // CV_8UC1;
+ auto cv_img = ToCvImage(output, cv_type);
+
+ sensor_msgs::Image img_msg;
+
+ try {
+ // cv::Mat -> sensor_msgs::Image
+ std_msgs::Header header; // empty header
+ header.seq = 0; // user defined counter
+ header.stamp = ros::Time::now();// time
+ //cv_bridge::CvImage img_bridge = cv_bridge::CvImage(header, sensor_msgs::image_encodings::MONO8, cv_img);
+ cv_bridge::CvImage img_bridge = cv_bridge::CvImage(header, sensor_msgs::image_encodings::TYPE_32FC1, cv_img);
+
+ img_bridge.toImageMsg(img_msg); // cv_bridge -> sensor_msgs::Image
+ }
+ catch (cv_bridge::Exception& e)
+ {
+ ROS_ERROR("cv_bridge exception: %s", e.what());
+ return;
+ }
+
+ // Output modified video stream
+ image_pub_.publish(img_msg);
+ }
+};
+
+int main(int argc, char** argv)
+{
+ ros::init(argc, argv, "midas", ros::init_options::AnonymousName);
+ Midas ic;
+ ros::spin();
+ return 0;
+}
\ No newline at end of file
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/run_talker_listener_test.sh b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/run_talker_listener_test.sh
new file mode 100644
index 0000000000000000000000000000000000000000..181447a83afc9c273ab910cb7e71526b99069e9f
--- /dev/null
+++ b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/run_talker_listener_test.sh
@@ -0,0 +1,16 @@
+# place any test.mp4 file near with this file
+
+# roscore
+# rosnode kill -a
+
+source ~/catkin_ws/devel/setup.bash
+
+roscore &
+P1=$!
+rosrun midas_cpp talker.py &
+P2=$!
+rosrun midas_cpp listener_original.py &
+P3=$!
+rosrun midas_cpp listener.py &
+P4=$!
+wait $P1 $P2 $P3 $P4
\ No newline at end of file
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/README.md b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..dca3cf8efd6110c698e218759cc7ef0a83980c0d
--- /dev/null
+++ b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/README.md
@@ -0,0 +1,147 @@
+## Towards Robust Monocular Depth Estimation: Mixing Datasets for Zero-shot Cross-dataset Transfer
+
+### TensorFlow inference using `.pb` and `.onnx` models
+
+1. [Run inference on TensorFlow-model by using TensorFlow](#run-inference-on-tensorflow-model-by-using-tensorFlow)
+
+2. [Run inference on ONNX-model by using TensorFlow](#run-inference-on-onnx-model-by-using-tensorflow)
+
+3. [Make ONNX model from downloaded Pytorch model file](#make-onnx-model-from-downloaded-pytorch-model-file)
+
+
+### Run inference on TensorFlow-model by using TensorFlow
+
+1) Download the model weights [model-f6b98070.pb](https://github.com/isl-org/MiDaS/releases/download/v2_1/model-f6b98070.pb)
+and [model-small.pb](https://github.com/isl-org/MiDaS/releases/download/v2_1/model-small.pb) and place the
+file in the `/tf/` folder.
+
+2) Set up dependencies:
+
+```shell
+# install OpenCV
+pip install --upgrade pip
+pip install opencv-python
+
+# install TensorFlow
+pip install -I grpcio tensorflow==2.3.0 tensorflow-addons==0.11.2 numpy==1.18.0
+```
+
+#### Usage
+
+1) Place one or more input images in the folder `tf/input`.
+
+2) Run the model:
+
+ ```shell
+ python tf/run_pb.py
+ ```
+
+ Or run the small model:
+
+ ```shell
+ python tf/run_pb.py --model_weights model-small.pb --model_type small
+ ```
+
+3) The resulting inverse depth maps are written to the `tf/output` folder.
+
+
+### Run inference on ONNX-model by using ONNX-Runtime
+
+1) Download the model weights [model-f6b98070.onnx](https://github.com/isl-org/MiDaS/releases/download/v2_1/model-f6b98070.onnx)
+and [model-small.onnx](https://github.com/isl-org/MiDaS/releases/download/v2_1/model-small.onnx) and place the
+file in the `/tf/` folder.
+
+2) Set up dependencies:
+
+```shell
+# install OpenCV
+pip install --upgrade pip
+pip install opencv-python
+
+# install ONNX
+pip install onnx==1.7.0
+
+# install ONNX Runtime
+pip install onnxruntime==1.5.2
+```
+
+#### Usage
+
+1) Place one or more input images in the folder `tf/input`.
+
+2) Run the model:
+
+ ```shell
+ python tf/run_onnx.py
+ ```
+
+ Or run the small model:
+
+ ```shell
+ python tf/run_onnx.py --model_weights model-small.onnx --model_type small
+ ```
+
+3) The resulting inverse depth maps are written to the `tf/output` folder.
+
+
+
+### Make ONNX model from downloaded Pytorch model file
+
+1) Download the model weights [model-f6b98070.pt](https://github.com/isl-org/MiDaS/releases/download/v2_1/model-f6b98070.pt) and place the
+file in the root folder.
+
+2) Set up dependencies:
+
+```shell
+# install OpenCV
+pip install --upgrade pip
+pip install opencv-python
+
+# install PyTorch TorchVision
+pip install -I torch==1.7.0 torchvision==0.8.0
+
+# install TensorFlow
+pip install -I grpcio tensorflow==2.3.0 tensorflow-addons==0.11.2 numpy==1.18.0
+
+# install ONNX
+pip install onnx==1.7.0
+
+# install ONNX-TensorFlow
+git clone https://github.com/onnx/onnx-tensorflow.git
+cd onnx-tensorflow
+git checkout 095b51b88e35c4001d70f15f80f31014b592b81e
+pip install -e .
+```
+
+#### Usage
+
+1) Run the converter:
+
+ ```shell
+ python tf/make_onnx_model.py
+ ```
+
+2) The resulting `model-f6b98070.onnx` file is written to the `/tf/` folder.
+
+
+### Requirements
+
+ The code was tested with Python 3.6.9, PyTorch 1.5.1, TensorFlow 2.2.0, TensorFlow-addons 0.8.3, ONNX 1.7.0, ONNX-TensorFlow (GitHub-master-17.07.2020) and OpenCV 4.3.0.
+
+### Citation
+
+Please cite our paper if you use this code or any of the models:
+```
+@article{Ranftl2019,
+ author = {Ren\'{e} Ranftl and Katrin Lasinger and David Hafner and Konrad Schindler and Vladlen Koltun},
+ title = {Towards Robust Monocular Depth Estimation: Mixing Datasets for Zero-shot Cross-dataset Transfer},
+ journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI)},
+ year = {2020},
+}
+```
+
+### License
+
+MIT License
+
+
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/input/.placeholder b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/input/.placeholder
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/make_onnx_model.py b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/make_onnx_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..49afaee6851e453a06f3b58ba9a9558375c90164
--- /dev/null
+++ b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/make_onnx_model.py
@@ -0,0 +1,112 @@
+"""Compute depth maps for images in the input folder.
+"""
+import os
+import ntpath
+import glob
+import torch
+import utils
+import cv2
+import numpy as np
+from torchvision.transforms import Compose, Normalize
+from torchvision import transforms
+
+from shutil import copyfile
+import fileinput
+import sys
+sys.path.append(os.getcwd() + '/..')
+
+def modify_file():
+ modify_filename = '../midas/blocks.py'
+ copyfile(modify_filename, modify_filename+'.bak')
+
+ with open(modify_filename, 'r') as file :
+ filedata = file.read()
+
+ filedata = filedata.replace('align_corners=True', 'align_corners=False')
+ filedata = filedata.replace('import torch.nn as nn', 'import torch.nn as nn\nimport torchvision.models as models')
+ filedata = filedata.replace('torch.hub.load("facebookresearch/WSL-Images", "resnext101_32x8d_wsl")', 'models.resnext101_32x8d()')
+
+ with open(modify_filename, 'w') as file:
+ file.write(filedata)
+
+def restore_file():
+ modify_filename = '../midas/blocks.py'
+ copyfile(modify_filename+'.bak', modify_filename)
+
+modify_file()
+
+from midas.midas_net import MidasNet
+from midas.transforms import Resize, NormalizeImage, PrepareForNet
+
+restore_file()
+
+
+class MidasNet_preprocessing(MidasNet):
+ """Network for monocular depth estimation.
+ """
+ def forward(self, x):
+ """Forward pass.
+
+ Args:
+ x (tensor): input data (image)
+
+ Returns:
+ tensor: depth
+ """
+
+ mean = torch.tensor([0.485, 0.456, 0.406])
+ std = torch.tensor([0.229, 0.224, 0.225])
+ x.sub_(mean[None, :, None, None]).div_(std[None, :, None, None])
+
+ return MidasNet.forward(self, x)
+
+
+def run(model_path):
+ """Run MonoDepthNN to compute depth maps.
+
+ Args:
+ model_path (str): path to saved model
+ """
+ print("initialize")
+
+ # select device
+
+ # load network
+ #model = MidasNet(model_path, non_negative=True)
+ model = MidasNet_preprocessing(model_path, non_negative=True)
+
+ model.eval()
+
+ print("start processing")
+
+ # input
+ img_input = np.zeros((3, 384, 384), np.float32)
+
+ # compute
+ with torch.no_grad():
+ sample = torch.from_numpy(img_input).unsqueeze(0)
+ prediction = model.forward(sample)
+ prediction = (
+ torch.nn.functional.interpolate(
+ prediction.unsqueeze(1),
+ size=img_input.shape[:2],
+ mode="bicubic",
+ align_corners=False,
+ )
+ .squeeze()
+ .cpu()
+ .numpy()
+ )
+
+ torch.onnx.export(model, sample, ntpath.basename(model_path).rsplit('.', 1)[0]+'.onnx', opset_version=9)
+
+ print("finished")
+
+
+if __name__ == "__main__":
+ # set paths
+ # MODEL_PATH = "model.pt"
+ MODEL_PATH = "../model-f6b98070.pt"
+
+ # compute depth maps
+ run(MODEL_PATH)
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/output/.placeholder b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/output/.placeholder
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/run_onnx.py b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/run_onnx.py
new file mode 100644
index 0000000000000000000000000000000000000000..48587111c3e6a7cc97674f3bb35ebbf222a7123d
--- /dev/null
+++ b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/run_onnx.py
@@ -0,0 +1,119 @@
+"""Compute depth maps for images in the input folder.
+"""
+import os
+import glob
+import utils
+import cv2
+import sys
+import numpy as np
+import argparse
+
+import onnx
+import onnxruntime as rt
+
+from transforms import Resize, NormalizeImage, PrepareForNet
+
+
+def run(input_path, output_path, model_path, model_type="large"):
+ """Run MonoDepthNN to compute depth maps.
+
+ Args:
+ input_path (str): path to input folder
+ output_path (str): path to output folder
+ model_path (str): path to saved model
+ """
+ print("initialize")
+
+ # select device
+ device = "CUDA:0"
+ #device = "CPU"
+ print("device: %s" % device)
+
+ # network resolution
+ if model_type == "large":
+ net_w, net_h = 384, 384
+ elif model_type == "small":
+ net_w, net_h = 256, 256
+ else:
+ print(f"model_type '{model_type}' not implemented, use: --model_type large")
+ assert False
+
+ # load network
+ print("loading model...")
+ model = rt.InferenceSession(model_path)
+ input_name = model.get_inputs()[0].name
+ output_name = model.get_outputs()[0].name
+
+ resize_image = Resize(
+ net_w,
+ net_h,
+ resize_target=None,
+ keep_aspect_ratio=False,
+ ensure_multiple_of=32,
+ resize_method="upper_bound",
+ image_interpolation_method=cv2.INTER_CUBIC,
+ )
+
+ def compose2(f1, f2):
+ return lambda x: f2(f1(x))
+
+ transform = compose2(resize_image, PrepareForNet())
+
+ # get input
+ img_names = glob.glob(os.path.join(input_path, "*"))
+ num_images = len(img_names)
+
+ # create output folder
+ os.makedirs(output_path, exist_ok=True)
+
+ print("start processing")
+
+ for ind, img_name in enumerate(img_names):
+
+ print(" processing {} ({}/{})".format(img_name, ind + 1, num_images))
+
+ # input
+ img = utils.read_image(img_name)
+ img_input = transform({"image": img})["image"]
+
+ # compute
+ output = model.run([output_name], {input_name: img_input.reshape(1, 3, net_h, net_w).astype(np.float32)})[0]
+ prediction = np.array(output).reshape(net_h, net_w)
+ prediction = cv2.resize(prediction, (img.shape[1], img.shape[0]), interpolation=cv2.INTER_CUBIC)
+
+ # output
+ filename = os.path.join(
+ output_path, os.path.splitext(os.path.basename(img_name))[0]
+ )
+ utils.write_depth(filename, prediction, bits=2)
+
+ print("finished")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+
+ parser.add_argument('-i', '--input_path',
+ default='input',
+ help='folder with input images'
+ )
+
+ parser.add_argument('-o', '--output_path',
+ default='output',
+ help='folder for output images'
+ )
+
+ parser.add_argument('-m', '--model_weights',
+ default='model-f6b98070.onnx',
+ help='path to the trained weights of model'
+ )
+
+ parser.add_argument('-t', '--model_type',
+ default='large',
+ help='model type: large or small'
+ )
+
+ args = parser.parse_args()
+
+ # compute depth maps
+ run(args.input_path, args.output_path, args.model_weights, args.model_type)
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/run_pb.py b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/run_pb.py
new file mode 100644
index 0000000000000000000000000000000000000000..0f3e8cbbf433f4fe43878b221da0517f770cc81f
--- /dev/null
+++ b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/run_pb.py
@@ -0,0 +1,135 @@
+"""Compute depth maps for images in the input folder.
+"""
+import os
+import glob
+import utils
+import cv2
+import argparse
+
+import tensorflow as tf
+
+from transforms import Resize, NormalizeImage, PrepareForNet
+
+def run(input_path, output_path, model_path, model_type="large"):
+ """Run MonoDepthNN to compute depth maps.
+
+ Args:
+ input_path (str): path to input folder
+ output_path (str): path to output folder
+ model_path (str): path to saved model
+ """
+ print("initialize")
+
+ # the runtime initialization will not allocate all memory on the device to avoid out of GPU memory
+ gpus = tf.config.experimental.list_physical_devices('GPU')
+ if gpus:
+ try:
+ for gpu in gpus:
+ #tf.config.experimental.set_memory_growth(gpu, True)
+ tf.config.experimental.set_virtual_device_configuration(gpu,
+ [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=4000)])
+ except RuntimeError as e:
+ print(e)
+
+ # network resolution
+ if model_type == "large":
+ net_w, net_h = 384, 384
+ elif model_type == "small":
+ net_w, net_h = 256, 256
+ else:
+ print(f"model_type '{model_type}' not implemented, use: --model_type large")
+ assert False
+
+ # load network
+ graph_def = tf.compat.v1.GraphDef()
+ with tf.io.gfile.GFile(model_path, 'rb') as f:
+ graph_def.ParseFromString(f.read())
+ tf.import_graph_def(graph_def, name='')
+
+
+ model_operations = tf.compat.v1.get_default_graph().get_operations()
+ input_node = '0:0'
+ output_layer = model_operations[len(model_operations) - 1].name + ':0'
+ print("Last layer name: ", output_layer)
+
+ resize_image = Resize(
+ net_w,
+ net_h,
+ resize_target=None,
+ keep_aspect_ratio=False,
+ ensure_multiple_of=32,
+ resize_method="upper_bound",
+ image_interpolation_method=cv2.INTER_CUBIC,
+ )
+
+ def compose2(f1, f2):
+ return lambda x: f2(f1(x))
+
+ transform = compose2(resize_image, PrepareForNet())
+
+ # get input
+ img_names = glob.glob(os.path.join(input_path, "*"))
+ num_images = len(img_names)
+
+ # create output folder
+ os.makedirs(output_path, exist_ok=True)
+
+ print("start processing")
+
+ with tf.compat.v1.Session() as sess:
+ try:
+ # load images
+ for ind, img_name in enumerate(img_names):
+
+ print(" processing {} ({}/{})".format(img_name, ind + 1, num_images))
+
+ # input
+ img = utils.read_image(img_name)
+ img_input = transform({"image": img})["image"]
+
+ # compute
+ prob_tensor = sess.graph.get_tensor_by_name(output_layer)
+ prediction, = sess.run(prob_tensor, {input_node: [img_input] })
+ prediction = prediction.reshape(net_h, net_w)
+ prediction = cv2.resize(prediction, (img.shape[1], img.shape[0]), interpolation=cv2.INTER_CUBIC)
+
+ # output
+ filename = os.path.join(
+ output_path, os.path.splitext(os.path.basename(img_name))[0]
+ )
+ utils.write_depth(filename, prediction, bits=2)
+
+ except KeyError:
+ print ("Couldn't find input node: ' + input_node + ' or output layer: " + output_layer + ".")
+ exit(-1)
+
+ print("finished")
+
+
+if __name__ == "__main__":
+ parser = argparse.ArgumentParser()
+
+ parser.add_argument('-i', '--input_path',
+ default='input',
+ help='folder with input images'
+ )
+
+ parser.add_argument('-o', '--output_path',
+ default='output',
+ help='folder for output images'
+ )
+
+ parser.add_argument('-m', '--model_weights',
+ default='model-f6b98070.pb',
+ help='path to the trained weights of model'
+ )
+
+ parser.add_argument('-t', '--model_type',
+ default='large',
+ help='model type: large or small'
+ )
+
+ args = parser.parse_args()
+
+ # compute depth maps
+ run(args.input_path, args.output_path, args.model_weights, args.model_type)
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/transforms.py b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/transforms.py
new file mode 100644
index 0000000000000000000000000000000000000000..37d68afcebf67defadd7dfb2ff5494f257876575
--- /dev/null
+++ b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/transforms.py
@@ -0,0 +1,234 @@
+import numpy as np
+import cv2
+import math
+
+
+def apply_min_size(sample, size, image_interpolation_method=cv2.INTER_AREA):
+ """Rezise the sample to ensure the given size. Keeps aspect ratio.
+
+ Args:
+ sample (dict): sample
+ size (tuple): image size
+
+ Returns:
+ tuple: new size
+ """
+ shape = list(sample["disparity"].shape)
+
+ if shape[0] >= size[0] and shape[1] >= size[1]:
+ return sample
+
+ scale = [0, 0]
+ scale[0] = size[0] / shape[0]
+ scale[1] = size[1] / shape[1]
+
+ scale = max(scale)
+
+ shape[0] = math.ceil(scale * shape[0])
+ shape[1] = math.ceil(scale * shape[1])
+
+ # resize
+ sample["image"] = cv2.resize(
+ sample["image"], tuple(shape[::-1]), interpolation=image_interpolation_method
+ )
+
+ sample["disparity"] = cv2.resize(
+ sample["disparity"], tuple(shape[::-1]), interpolation=cv2.INTER_NEAREST
+ )
+ sample["mask"] = cv2.resize(
+ sample["mask"].astype(np.float32),
+ tuple(shape[::-1]),
+ interpolation=cv2.INTER_NEAREST,
+ )
+ sample["mask"] = sample["mask"].astype(bool)
+
+ return tuple(shape)
+
+
+class Resize(object):
+ """Resize sample to given size (width, height).
+ """
+
+ def __init__(
+ self,
+ width,
+ height,
+ resize_target=True,
+ keep_aspect_ratio=False,
+ ensure_multiple_of=1,
+ resize_method="lower_bound",
+ image_interpolation_method=cv2.INTER_AREA,
+ ):
+ """Init.
+
+ Args:
+ width (int): desired output width
+ height (int): desired output height
+ resize_target (bool, optional):
+ True: Resize the full sample (image, mask, target).
+ False: Resize image only.
+ Defaults to True.
+ keep_aspect_ratio (bool, optional):
+ True: Keep the aspect ratio of the input sample.
+ Output sample might not have the given width and height, and
+ resize behaviour depends on the parameter 'resize_method'.
+ Defaults to False.
+ ensure_multiple_of (int, optional):
+ Output width and height is constrained to be multiple of this parameter.
+ Defaults to 1.
+ resize_method (str, optional):
+ "lower_bound": Output will be at least as large as the given size.
+ "upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.)
+ "minimal": Scale as least as possible. (Output size might be smaller than given size.)
+ Defaults to "lower_bound".
+ """
+ self.__width = width
+ self.__height = height
+
+ self.__resize_target = resize_target
+ self.__keep_aspect_ratio = keep_aspect_ratio
+ self.__multiple_of = ensure_multiple_of
+ self.__resize_method = resize_method
+ self.__image_interpolation_method = image_interpolation_method
+
+ def constrain_to_multiple_of(self, x, min_val=0, max_val=None):
+ y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int)
+
+ if max_val is not None and y > max_val:
+ y = (np.floor(x / self.__multiple_of) * self.__multiple_of).astype(int)
+
+ if y < min_val:
+ y = (np.ceil(x / self.__multiple_of) * self.__multiple_of).astype(int)
+
+ return y
+
+ def get_size(self, width, height):
+ # determine new height and width
+ scale_height = self.__height / height
+ scale_width = self.__width / width
+
+ if self.__keep_aspect_ratio:
+ if self.__resize_method == "lower_bound":
+ # scale such that output size is lower bound
+ if scale_width > scale_height:
+ # fit width
+ scale_height = scale_width
+ else:
+ # fit height
+ scale_width = scale_height
+ elif self.__resize_method == "upper_bound":
+ # scale such that output size is upper bound
+ if scale_width < scale_height:
+ # fit width
+ scale_height = scale_width
+ else:
+ # fit height
+ scale_width = scale_height
+ elif self.__resize_method == "minimal":
+ # scale as least as possbile
+ if abs(1 - scale_width) < abs(1 - scale_height):
+ # fit width
+ scale_height = scale_width
+ else:
+ # fit height
+ scale_width = scale_height
+ else:
+ raise ValueError(
+ f"resize_method {self.__resize_method} not implemented"
+ )
+
+ if self.__resize_method == "lower_bound":
+ new_height = self.constrain_to_multiple_of(
+ scale_height * height, min_val=self.__height
+ )
+ new_width = self.constrain_to_multiple_of(
+ scale_width * width, min_val=self.__width
+ )
+ elif self.__resize_method == "upper_bound":
+ new_height = self.constrain_to_multiple_of(
+ scale_height * height, max_val=self.__height
+ )
+ new_width = self.constrain_to_multiple_of(
+ scale_width * width, max_val=self.__width
+ )
+ elif self.__resize_method == "minimal":
+ new_height = self.constrain_to_multiple_of(scale_height * height)
+ new_width = self.constrain_to_multiple_of(scale_width * width)
+ else:
+ raise ValueError(f"resize_method {self.__resize_method} not implemented")
+
+ return (new_width, new_height)
+
+ def __call__(self, sample):
+ width, height = self.get_size(
+ sample["image"].shape[1], sample["image"].shape[0]
+ )
+
+ # resize sample
+ sample["image"] = cv2.resize(
+ sample["image"],
+ (width, height),
+ interpolation=self.__image_interpolation_method,
+ )
+
+ if self.__resize_target:
+ if "disparity" in sample:
+ sample["disparity"] = cv2.resize(
+ sample["disparity"],
+ (width, height),
+ interpolation=cv2.INTER_NEAREST,
+ )
+
+ if "depth" in sample:
+ sample["depth"] = cv2.resize(
+ sample["depth"], (width, height), interpolation=cv2.INTER_NEAREST
+ )
+
+ sample["mask"] = cv2.resize(
+ sample["mask"].astype(np.float32),
+ (width, height),
+ interpolation=cv2.INTER_NEAREST,
+ )
+ sample["mask"] = sample["mask"].astype(bool)
+
+ return sample
+
+
+class NormalizeImage(object):
+ """Normlize image by given mean and std.
+ """
+
+ def __init__(self, mean, std):
+ self.__mean = mean
+ self.__std = std
+
+ def __call__(self, sample):
+ sample["image"] = (sample["image"] - self.__mean) / self.__std
+
+ return sample
+
+
+class PrepareForNet(object):
+ """Prepare sample for usage as network input.
+ """
+
+ def __init__(self):
+ pass
+
+ def __call__(self, sample):
+ image = np.transpose(sample["image"], (2, 0, 1))
+ sample["image"] = np.ascontiguousarray(image).astype(np.float32)
+
+ if "mask" in sample:
+ sample["mask"] = sample["mask"].astype(np.float32)
+ sample["mask"] = np.ascontiguousarray(sample["mask"])
+
+ if "disparity" in sample:
+ disparity = sample["disparity"].astype(np.float32)
+ sample["disparity"] = np.ascontiguousarray(disparity)
+
+ if "depth" in sample:
+ depth = sample["depth"].astype(np.float32)
+ sample["depth"] = np.ascontiguousarray(depth)
+
+ return sample
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/utils.py b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..cdc1c60e3291ebc1ba57ab0cc4788dcb02fecd76
--- /dev/null
+++ b/RAVE-main/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/utils.py
@@ -0,0 +1,82 @@
+import numpy as np
+import sys
+import cv2
+
+
+def write_pfm(path, image, scale=1):
+ """Write pfm file.
+ Args:
+ path (str): pathto file
+ image (array): data
+ scale (int, optional): Scale. Defaults to 1.
+ """
+
+ with open(path, "wb") as file:
+ color = None
+
+ if image.dtype.name != "float32":
+ raise Exception("Image dtype must be float32.")
+
+ image = np.flipud(image)
+
+ if len(image.shape) == 3 and image.shape[2] == 3: # color image
+ color = True
+ elif (
+ len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1
+ ): # greyscale
+ color = False
+ else:
+ raise Exception("Image must have H x W x 3, H x W x 1 or H x W dimensions.")
+
+ file.write("PF\n" if color else "Pf\n".encode())
+ file.write("%d %d\n".encode() % (image.shape[1], image.shape[0]))
+
+ endian = image.dtype.byteorder
+
+ if endian == "<" or endian == "=" and sys.byteorder == "little":
+ scale = -scale
+
+ file.write("%f\n".encode() % scale)
+
+ image.tofile(file)
+
+def read_image(path):
+ """Read image and output RGB image (0-1).
+ Args:
+ path (str): path to file
+ Returns:
+ array: RGB image (0-1)
+ """
+ img = cv2.imread(path)
+
+ if img.ndim == 2:
+ img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
+
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) / 255.0
+
+ return img
+
+def write_depth(path, depth, bits=1):
+ """Write depth map to pfm and png file.
+ Args:
+ path (str): filepath without extension
+ depth (array): depth
+ """
+ write_pfm(path + ".pfm", depth.astype(np.float32))
+
+ depth_min = depth.min()
+ depth_max = depth.max()
+
+ max_val = (2**(8*bits))-1
+
+ if depth_max - depth_min > np.finfo("float").eps:
+ out = max_val * (depth - depth_min) / (depth_max - depth_min)
+ else:
+ out = 0
+
+ if bits == 1:
+ cv2.imwrite(path + ".png", out.astype("uint8"))
+ elif bits == 2:
+ cv2.imwrite(path + ".png", out.astype("uint16"))
+
+ return
\ No newline at end of file
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/builder.py b/RAVE-main/annotator/zoe/zoedepth/models/builder.py
new file mode 100644
index 0000000000000000000000000000000000000000..a7c6f7bb6fa39af071b4d6ca267b53084b3ee5f6
--- /dev/null
+++ b/RAVE-main/annotator/zoe/zoedepth/models/builder.py
@@ -0,0 +1,51 @@
+# MIT License
+
+# Copyright (c) 2022 Intelligent Systems Lab Org
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# File author: Shariq Farooq Bhat
+
+from importlib import import_module
+from .depth_model import DepthModel
+
+def build_model(config) -> DepthModel:
+ """Builds a model from a config. The model is specified by the model name and version in the config. The model is then constructed using the build_from_config function of the model interface.
+ This function should be used to construct models for training and evaluation.
+
+ Args:
+ config (dict): Config dict. Config is constructed in utils/config.py. Each model has its own config file(s) saved in its root model folder.
+
+ Returns:
+ torch.nn.Module: Model corresponding to name and version as specified in config
+ """
+ module_name = f"zoedepth.models.{config.model}"
+ try:
+ module = import_module(module_name)
+ except ModuleNotFoundError as e:
+ # print the original error message
+ print(e)
+ raise ValueError(
+ f"Model {config.model} not found. Refer above error for details.") from e
+ try:
+ get_version = getattr(module, "get_version")
+ except AttributeError as e:
+ raise ValueError(
+ f"Model {config.model} has no get_version function.") from e
+ return get_version(config.version_name).build_from_config(config)
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/depth_model.py b/RAVE-main/annotator/zoe/zoedepth/models/depth_model.py
new file mode 100644
index 0000000000000000000000000000000000000000..e4301a4f65755a011f5384b59fa4d931cd977948
--- /dev/null
+++ b/RAVE-main/annotator/zoe/zoedepth/models/depth_model.py
@@ -0,0 +1,152 @@
+# MIT License
+
+# Copyright (c) 2022 Intelligent Systems Lab Org
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# File author: Shariq Farooq Bhat
+
+import numpy as np
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+from torchvision import transforms
+import PIL.Image
+from PIL import Image
+from typing import Union
+
+
+class DepthModel(nn.Module):
+ def __init__(self):
+ super().__init__()
+ self.device = 'cpu'
+
+ def to(self, device) -> nn.Module:
+ self.device = device
+ return super().to(device)
+
+ def forward(self, x, *args, **kwargs):
+ raise NotImplementedError
+
+ def _infer(self, x: torch.Tensor):
+ """
+ Inference interface for the model
+ Args:
+ x (torch.Tensor): input tensor of shape (b, c, h, w)
+ Returns:
+ torch.Tensor: output tensor of shape (b, 1, h, w)
+ """
+ return self(x)['metric_depth']
+
+ def _infer_with_pad_aug(self, x: torch.Tensor, pad_input: bool=True, fh: float=3, fw: float=3, upsampling_mode: str='bicubic', padding_mode="reflect", **kwargs) -> torch.Tensor:
+ """
+ Inference interface for the model with padding augmentation
+ Padding augmentation fixes the boundary artifacts in the output depth map.
+ Boundary artifacts are sometimes caused by the fact that the model is trained on NYU raw dataset which has a black or white border around the image.
+ This augmentation pads the input image and crops the prediction back to the original size / view.
+
+ Note: This augmentation is not required for the models trained with 'avoid_boundary'=True.
+ Args:
+ x (torch.Tensor): input tensor of shape (b, c, h, w)
+ pad_input (bool, optional): whether to pad the input or not. Defaults to True.
+ fh (float, optional): height padding factor. The padding is calculated as sqrt(h/2) * fh. Defaults to 3.
+ fw (float, optional): width padding factor. The padding is calculated as sqrt(w/2) * fw. Defaults to 3.
+ upsampling_mode (str, optional): upsampling mode. Defaults to 'bicubic'.
+ padding_mode (str, optional): padding mode. Defaults to "reflect".
+ Returns:
+ torch.Tensor: output tensor of shape (b, 1, h, w)
+ """
+ # assert x is nchw and c = 3
+ assert x.dim() == 4, "x must be 4 dimensional, got {}".format(x.dim())
+ assert x.shape[1] == 3, "x must have 3 channels, got {}".format(x.shape[1])
+
+ if pad_input:
+ assert fh > 0 or fw > 0, "atlease one of fh and fw must be greater than 0"
+ pad_h = int(np.sqrt(x.shape[2]/2) * fh)
+ pad_w = int(np.sqrt(x.shape[3]/2) * fw)
+ padding = [pad_w, pad_w]
+ if pad_h > 0:
+ padding += [pad_h, pad_h]
+
+ x = F.pad(x, padding, mode=padding_mode, **kwargs)
+ out = self._infer(x)
+ if out.shape[-2:] != x.shape[-2:]:
+ out = F.interpolate(out, size=(x.shape[2], x.shape[3]), mode=upsampling_mode, align_corners=False)
+ if pad_input:
+ # crop to the original size, handling the case where pad_h and pad_w is 0
+ if pad_h > 0:
+ out = out[:, :, pad_h:-pad_h,:]
+ if pad_w > 0:
+ out = out[:, :, :, pad_w:-pad_w]
+ return out
+
+ def infer_with_flip_aug(self, x, pad_input: bool=True, **kwargs) -> torch.Tensor:
+ """
+ Inference interface for the model with horizontal flip augmentation
+ Horizontal flip augmentation improves the accuracy of the model by averaging the output of the model with and without horizontal flip.
+ Args:
+ x (torch.Tensor): input tensor of shape (b, c, h, w)
+ pad_input (bool, optional): whether to use padding augmentation. Defaults to True.
+ Returns:
+ torch.Tensor: output tensor of shape (b, 1, h, w)
+ """
+ # infer with horizontal flip and average
+ out = self._infer_with_pad_aug(x, pad_input=pad_input, **kwargs)
+ out_flip = self._infer_with_pad_aug(torch.flip(x, dims=[3]), pad_input=pad_input, **kwargs)
+ out = (out + torch.flip(out_flip, dims=[3])) / 2
+ return out
+
+ def infer(self, x, pad_input: bool=True, with_flip_aug: bool=True, **kwargs) -> torch.Tensor:
+ """
+ Inference interface for the model
+ Args:
+ x (torch.Tensor): input tensor of shape (b, c, h, w)
+ pad_input (bool, optional): whether to use padding augmentation. Defaults to True.
+ with_flip_aug (bool, optional): whether to use horizontal flip augmentation. Defaults to True.
+ Returns:
+ torch.Tensor: output tensor of shape (b, 1, h, w)
+ """
+ if with_flip_aug:
+ return self.infer_with_flip_aug(x, pad_input=pad_input, **kwargs)
+ else:
+ return self._infer_with_pad_aug(x, pad_input=pad_input, **kwargs)
+
+ @torch.no_grad()
+ def infer_pil(self, pil_img, pad_input: bool=True, with_flip_aug: bool=True, output_type: str="numpy", **kwargs) -> Union[np.ndarray, PIL.Image.Image, torch.Tensor]:
+ """
+ Inference interface for the model for PIL image
+ Args:
+ pil_img (PIL.Image.Image): input PIL image
+ pad_input (bool, optional): whether to use padding augmentation. Defaults to True.
+ with_flip_aug (bool, optional): whether to use horizontal flip augmentation. Defaults to True.
+ output_type (str, optional): output type. Supported values are 'numpy', 'pil' and 'tensor'. Defaults to "numpy".
+ """
+ x = transforms.ToTensor()(pil_img).unsqueeze(0).to(self.device)
+ out_tensor = self.infer(x, pad_input=pad_input, with_flip_aug=with_flip_aug, **kwargs)
+ if output_type == "numpy":
+ return out_tensor.squeeze().cpu().numpy()
+ elif output_type == "pil":
+ # uint16 is required for depth pil image
+ out_16bit_numpy = (out_tensor.squeeze().cpu().numpy()*256).astype(np.uint16)
+ return Image.fromarray(out_16bit_numpy)
+ elif output_type == "tensor":
+ return out_tensor.squeeze().cpu()
+ else:
+ raise ValueError(f"output_type {output_type} not supported. Supported values are 'numpy', 'pil' and 'tensor'")
+
\ No newline at end of file
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/layers/__pycache__/attractor.cpython-38.pyc b/RAVE-main/annotator/zoe/zoedepth/models/layers/__pycache__/attractor.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..09c73d2b55e5f052f8ef5285c7a5bed5f794bcea
Binary files /dev/null and b/RAVE-main/annotator/zoe/zoedepth/models/layers/__pycache__/attractor.cpython-38.pyc differ
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/layers/__pycache__/dist_layers.cpython-38.pyc b/RAVE-main/annotator/zoe/zoedepth/models/layers/__pycache__/dist_layers.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d2c171b7f16b88efa9173e17a50c02b3994fc5ff
Binary files /dev/null and b/RAVE-main/annotator/zoe/zoedepth/models/layers/__pycache__/dist_layers.cpython-38.pyc differ
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/layers/__pycache__/localbins_layers.cpython-38.pyc b/RAVE-main/annotator/zoe/zoedepth/models/layers/__pycache__/localbins_layers.cpython-38.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6b02219818c983f7ed3cbf23226be8f9ec6e359f
Binary files /dev/null and b/RAVE-main/annotator/zoe/zoedepth/models/layers/__pycache__/localbins_layers.cpython-38.pyc differ
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/layers/attractor.py b/RAVE-main/annotator/zoe/zoedepth/models/layers/attractor.py
new file mode 100644
index 0000000000000000000000000000000000000000..b5e3473ca6e2271dc28666314cf8f92f52f7e3c6
--- /dev/null
+++ b/RAVE-main/annotator/zoe/zoedepth/models/layers/attractor.py
@@ -0,0 +1,208 @@
+# MIT License
+
+# Copyright (c) 2022 Intelligent Systems Lab Org
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# File author: Shariq Farooq Bhat
+
+import torch
+import torch.nn as nn
+
+
+@torch.jit.script
+def exp_attractor(dx, alpha: float = 300, gamma: int = 2):
+ """Exponential attractor: dc = exp(-alpha*|dx|^gamma) * dx , where dx = a - c, a = attractor point, c = bin center, dc = shift in bin centermmary for exp_attractor
+
+ Args:
+ dx (torch.Tensor): The difference tensor dx = Ai - Cj, where Ai is the attractor point and Cj is the bin center.
+ alpha (float, optional): Proportional Attractor strength. Determines the absolute strength. Lower alpha = greater attraction. Defaults to 300.
+ gamma (int, optional): Exponential Attractor strength. Determines the "region of influence" and indirectly number of bin centers affected. Lower gamma = farther reach. Defaults to 2.
+
+ Returns:
+ torch.Tensor : Delta shifts - dc; New bin centers = Old bin centers + dc
+ """
+ return torch.exp(-alpha*(torch.abs(dx)**gamma)) * (dx)
+
+
+@torch.jit.script
+def inv_attractor(dx, alpha: float = 300, gamma: int = 2):
+ """Inverse attractor: dc = dx / (1 + alpha*dx^gamma), where dx = a - c, a = attractor point, c = bin center, dc = shift in bin center
+ This is the default one according to the accompanying paper.
+
+ Args:
+ dx (torch.Tensor): The difference tensor dx = Ai - Cj, where Ai is the attractor point and Cj is the bin center.
+ alpha (float, optional): Proportional Attractor strength. Determines the absolute strength. Lower alpha = greater attraction. Defaults to 300.
+ gamma (int, optional): Exponential Attractor strength. Determines the "region of influence" and indirectly number of bin centers affected. Lower gamma = farther reach. Defaults to 2.
+
+ Returns:
+ torch.Tensor: Delta shifts - dc; New bin centers = Old bin centers + dc
+ """
+ return dx.div(1+alpha*dx.pow(gamma))
+
+
+class AttractorLayer(nn.Module):
+ def __init__(self, in_features, n_bins, n_attractors=16, mlp_dim=128, min_depth=1e-3, max_depth=10,
+ alpha=300, gamma=2, kind='sum', attractor_type='exp', memory_efficient=False):
+ """
+ Attractor layer for bin centers. Bin centers are bounded on the interval (min_depth, max_depth)
+ """
+ super().__init__()
+
+ self.n_attractors = n_attractors
+ self.n_bins = n_bins
+ self.min_depth = min_depth
+ self.max_depth = max_depth
+ self.alpha = alpha
+ self.gamma = gamma
+ self.kind = kind
+ self.attractor_type = attractor_type
+ self.memory_efficient = memory_efficient
+
+ self._net = nn.Sequential(
+ nn.Conv2d(in_features, mlp_dim, 1, 1, 0),
+ nn.ReLU(inplace=True),
+ nn.Conv2d(mlp_dim, n_attractors*2, 1, 1, 0), # x2 for linear norm
+ nn.ReLU(inplace=True)
+ )
+
+ def forward(self, x, b_prev, prev_b_embedding=None, interpolate=True, is_for_query=False):
+ """
+ Args:
+ x (torch.Tensor) : feature block; shape - n, c, h, w
+ b_prev (torch.Tensor) : previous bin centers normed; shape - n, prev_nbins, h, w
+
+ Returns:
+ tuple(torch.Tensor,torch.Tensor) : new bin centers normed and scaled; shape - n, nbins, h, w
+ """
+ if prev_b_embedding is not None:
+ if interpolate:
+ prev_b_embedding = nn.functional.interpolate(
+ prev_b_embedding, x.shape[-2:], mode='bilinear', align_corners=True)
+ x = x + prev_b_embedding
+
+ A = self._net(x)
+ eps = 1e-3
+ A = A + eps
+ n, c, h, w = A.shape
+ A = A.view(n, self.n_attractors, 2, h, w)
+ A_normed = A / A.sum(dim=2, keepdim=True) # n, a, 2, h, w
+ A_normed = A[:, :, 0, ...] # n, na, h, w
+
+ b_prev = nn.functional.interpolate(
+ b_prev, (h, w), mode='bilinear', align_corners=True)
+ b_centers = b_prev
+
+ if self.attractor_type == 'exp':
+ dist = exp_attractor
+ else:
+ dist = inv_attractor
+
+ if not self.memory_efficient:
+ func = {'mean': torch.mean, 'sum': torch.sum}[self.kind]
+ # .shape N, nbins, h, w
+ delta_c = func(dist(A_normed.unsqueeze(
+ 2) - b_centers.unsqueeze(1)), dim=1)
+ else:
+ delta_c = torch.zeros_like(b_centers, device=b_centers.device)
+ for i in range(self.n_attractors):
+ # .shape N, nbins, h, w
+ delta_c += dist(A_normed[:, i, ...].unsqueeze(1) - b_centers)
+
+ if self.kind == 'mean':
+ delta_c = delta_c / self.n_attractors
+
+ b_new_centers = b_centers + delta_c
+ B_centers = (self.max_depth - self.min_depth) * \
+ b_new_centers + self.min_depth
+ B_centers, _ = torch.sort(B_centers, dim=1)
+ B_centers = torch.clip(B_centers, self.min_depth, self.max_depth)
+ return b_new_centers, B_centers
+
+
+class AttractorLayerUnnormed(nn.Module):
+ def __init__(self, in_features, n_bins, n_attractors=16, mlp_dim=128, min_depth=1e-3, max_depth=10,
+ alpha=300, gamma=2, kind='sum', attractor_type='exp', memory_efficient=False):
+ """
+ Attractor layer for bin centers. Bin centers are unbounded
+ """
+ super().__init__()
+
+ self.n_attractors = n_attractors
+ self.n_bins = n_bins
+ self.min_depth = min_depth
+ self.max_depth = max_depth
+ self.alpha = alpha
+ self.gamma = gamma
+ self.kind = kind
+ self.attractor_type = attractor_type
+ self.memory_efficient = memory_efficient
+
+ self._net = nn.Sequential(
+ nn.Conv2d(in_features, mlp_dim, 1, 1, 0),
+ nn.ReLU(inplace=True),
+ nn.Conv2d(mlp_dim, n_attractors, 1, 1, 0),
+ nn.Softplus()
+ )
+
+ def forward(self, x, b_prev, prev_b_embedding=None, interpolate=True, is_for_query=False):
+ """
+ Args:
+ x (torch.Tensor) : feature block; shape - n, c, h, w
+ b_prev (torch.Tensor) : previous bin centers normed; shape - n, prev_nbins, h, w
+
+ Returns:
+ tuple(torch.Tensor,torch.Tensor) : new bin centers unbounded; shape - n, nbins, h, w. Two outputs just to keep the API consistent with the normed version
+ """
+ if prev_b_embedding is not None:
+ if interpolate:
+ prev_b_embedding = nn.functional.interpolate(
+ prev_b_embedding, x.shape[-2:], mode='bilinear', align_corners=True)
+ x = x + prev_b_embedding
+
+ A = self._net(x)
+ n, c, h, w = A.shape
+
+ b_prev = nn.functional.interpolate(
+ b_prev, (h, w), mode='bilinear', align_corners=True)
+ b_centers = b_prev
+
+ if self.attractor_type == 'exp':
+ dist = exp_attractor
+ else:
+ dist = inv_attractor
+
+ if not self.memory_efficient:
+ func = {'mean': torch.mean, 'sum': torch.sum}[self.kind]
+ # .shape N, nbins, h, w
+ delta_c = func(
+ dist(A.unsqueeze(2) - b_centers.unsqueeze(1)), dim=1)
+ else:
+ delta_c = torch.zeros_like(b_centers, device=b_centers.device)
+ for i in range(self.n_attractors):
+ delta_c += dist(A[:, i, ...].unsqueeze(1) -
+ b_centers) # .shape N, nbins, h, w
+
+ if self.kind == 'mean':
+ delta_c = delta_c / self.n_attractors
+
+ b_new_centers = b_centers + delta_c
+ B_centers = b_new_centers
+
+ return b_new_centers, B_centers
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/layers/dist_layers.py b/RAVE-main/annotator/zoe/zoedepth/models/layers/dist_layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..d5ff793e94aeb43aa554ef8c2392080df5572e19
--- /dev/null
+++ b/RAVE-main/annotator/zoe/zoedepth/models/layers/dist_layers.py
@@ -0,0 +1,121 @@
+# MIT License
+
+# Copyright (c) 2022 Intelligent Systems Lab Org
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# File author: Shariq Farooq Bhat
+
+import torch
+import torch.nn as nn
+
+
+def log_binom(n, k, eps=1e-7):
+ """ log(nCk) using stirling approximation """
+ n = n + eps
+ k = k + eps
+ return n * torch.log(n) - k * torch.log(k) - (n-k) * torch.log(n-k+eps)
+
+
+class LogBinomial(nn.Module):
+ def __init__(self, n_classes=256, act=torch.softmax):
+ """Compute log binomial distribution for n_classes
+
+ Args:
+ n_classes (int, optional): number of output classes. Defaults to 256.
+ """
+ super().__init__()
+ self.K = n_classes
+ self.act = act
+ self.register_buffer('k_idx', torch.arange(
+ 0, n_classes).view(1, -1, 1, 1))
+ self.register_buffer('K_minus_1', torch.Tensor(
+ [self.K-1]).view(1, -1, 1, 1))
+
+ def forward(self, x, t=1., eps=1e-4):
+ """Compute log binomial distribution for x
+
+ Args:
+ x (torch.Tensor - NCHW): probabilities
+ t (float, torch.Tensor - NCHW, optional): Temperature of distribution. Defaults to 1..
+ eps (float, optional): Small number for numerical stability. Defaults to 1e-4.
+
+ Returns:
+ torch.Tensor -NCHW: log binomial distribution logbinomial(p;t)
+ """
+ if x.ndim == 3:
+ x = x.unsqueeze(1) # make it nchw
+
+ one_minus_x = torch.clamp(1 - x, eps, 1)
+ x = torch.clamp(x, eps, 1)
+ y = log_binom(self.K_minus_1, self.k_idx) + self.k_idx * \
+ torch.log(x) + (self.K - 1 - self.k_idx) * torch.log(one_minus_x)
+ return self.act(y/t, dim=1)
+
+
+class ConditionalLogBinomial(nn.Module):
+ def __init__(self, in_features, condition_dim, n_classes=256, bottleneck_factor=2, p_eps=1e-4, max_temp=50, min_temp=1e-7, act=torch.softmax):
+ """Conditional Log Binomial distribution
+
+ Args:
+ in_features (int): number of input channels in main feature
+ condition_dim (int): number of input channels in condition feature
+ n_classes (int, optional): Number of classes. Defaults to 256.
+ bottleneck_factor (int, optional): Hidden dim factor. Defaults to 2.
+ p_eps (float, optional): small eps value. Defaults to 1e-4.
+ max_temp (float, optional): Maximum temperature of output distribution. Defaults to 50.
+ min_temp (float, optional): Minimum temperature of output distribution. Defaults to 1e-7.
+ """
+ super().__init__()
+ self.p_eps = p_eps
+ self.max_temp = max_temp
+ self.min_temp = min_temp
+ self.log_binomial_transform = LogBinomial(n_classes, act=act)
+ bottleneck = (in_features + condition_dim) // bottleneck_factor
+ self.mlp = nn.Sequential(
+ nn.Conv2d(in_features + condition_dim, bottleneck,
+ kernel_size=1, stride=1, padding=0),
+ nn.GELU(),
+ # 2 for p linear norm, 2 for t linear norm
+ nn.Conv2d(bottleneck, 2+2, kernel_size=1, stride=1, padding=0),
+ nn.Softplus()
+ )
+
+ def forward(self, x, cond):
+ """Forward pass
+
+ Args:
+ x (torch.Tensor - NCHW): Main feature
+ cond (torch.Tensor - NCHW): condition feature
+
+ Returns:
+ torch.Tensor: Output log binomial distribution
+ """
+ pt = self.mlp(torch.concat((x, cond), dim=1))
+ p, t = pt[:, :2, ...], pt[:, 2:, ...]
+
+ p = p + self.p_eps
+ p = p[:, 0, ...] / (p[:, 0, ...] + p[:, 1, ...])
+
+ t = t + self.p_eps
+ t = t[:, 0, ...] / (t[:, 0, ...] + t[:, 1, ...])
+ t = t.unsqueeze(1)
+ t = (self.max_temp - self.min_temp) * t + self.min_temp
+
+ return self.log_binomial_transform(p, t)
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/layers/localbins_layers.py b/RAVE-main/annotator/zoe/zoedepth/models/layers/localbins_layers.py
new file mode 100644
index 0000000000000000000000000000000000000000..cda7b1f9c74ac0d54733ef6d1dcadc1c62ab4647
--- /dev/null
+++ b/RAVE-main/annotator/zoe/zoedepth/models/layers/localbins_layers.py
@@ -0,0 +1,169 @@
+# MIT License
+
+# Copyright (c) 2022 Intelligent Systems Lab Org
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# File author: Shariq Farooq Bhat
+
+import torch
+import torch.nn as nn
+
+
+class SeedBinRegressor(nn.Module):
+ def __init__(self, in_features, n_bins=16, mlp_dim=256, min_depth=1e-3, max_depth=10):
+ """Bin center regressor network. Bin centers are bounded on (min_depth, max_depth) interval.
+
+ Args:
+ in_features (int): input channels
+ n_bins (int, optional): Number of bin centers. Defaults to 16.
+ mlp_dim (int, optional): Hidden dimension. Defaults to 256.
+ min_depth (float, optional): Min depth value. Defaults to 1e-3.
+ max_depth (float, optional): Max depth value. Defaults to 10.
+ """
+ super().__init__()
+ self.version = "1_1"
+ self.min_depth = min_depth
+ self.max_depth = max_depth
+
+ self._net = nn.Sequential(
+ nn.Conv2d(in_features, mlp_dim, 1, 1, 0),
+ nn.ReLU(inplace=True),
+ nn.Conv2d(mlp_dim, n_bins, 1, 1, 0),
+ nn.ReLU(inplace=True)
+ )
+
+ def forward(self, x):
+ """
+ Returns tensor of bin_width vectors (centers). One vector b for every pixel
+ """
+ B = self._net(x)
+ eps = 1e-3
+ B = B + eps
+ B_widths_normed = B / B.sum(dim=1, keepdim=True)
+ B_widths = (self.max_depth - self.min_depth) * \
+ B_widths_normed # .shape NCHW
+ # pad has the form (left, right, top, bottom, front, back)
+ B_widths = nn.functional.pad(
+ B_widths, (0, 0, 0, 0, 1, 0), mode='constant', value=self.min_depth)
+ B_edges = torch.cumsum(B_widths, dim=1) # .shape NCHW
+
+ B_centers = 0.5 * (B_edges[:, :-1, ...] + B_edges[:, 1:, ...])
+ return B_widths_normed, B_centers
+
+
+class SeedBinRegressorUnnormed(nn.Module):
+ def __init__(self, in_features, n_bins=16, mlp_dim=256, min_depth=1e-3, max_depth=10):
+ """Bin center regressor network. Bin centers are unbounded
+
+ Args:
+ in_features (int): input channels
+ n_bins (int, optional): Number of bin centers. Defaults to 16.
+ mlp_dim (int, optional): Hidden dimension. Defaults to 256.
+ min_depth (float, optional): Not used. (for compatibility with SeedBinRegressor)
+ max_depth (float, optional): Not used. (for compatibility with SeedBinRegressor)
+ """
+ super().__init__()
+ self.version = "1_1"
+ self._net = nn.Sequential(
+ nn.Conv2d(in_features, mlp_dim, 1, 1, 0),
+ nn.ReLU(inplace=True),
+ nn.Conv2d(mlp_dim, n_bins, 1, 1, 0),
+ nn.Softplus()
+ )
+
+ def forward(self, x):
+ """
+ Returns tensor of bin_width vectors (centers). One vector b for every pixel
+ """
+ B_centers = self._net(x)
+ return B_centers, B_centers
+
+
+class Projector(nn.Module):
+ def __init__(self, in_features, out_features, mlp_dim=128):
+ """Projector MLP
+
+ Args:
+ in_features (int): input channels
+ out_features (int): output channels
+ mlp_dim (int, optional): hidden dimension. Defaults to 128.
+ """
+ super().__init__()
+
+ self._net = nn.Sequential(
+ nn.Conv2d(in_features, mlp_dim, 1, 1, 0),
+ nn.ReLU(inplace=True),
+ nn.Conv2d(mlp_dim, out_features, 1, 1, 0),
+ )
+
+ def forward(self, x):
+ return self._net(x)
+
+
+
+class LinearSplitter(nn.Module):
+ def __init__(self, in_features, prev_nbins, split_factor=2, mlp_dim=128, min_depth=1e-3, max_depth=10):
+ super().__init__()
+
+ self.prev_nbins = prev_nbins
+ self.split_factor = split_factor
+ self.min_depth = min_depth
+ self.max_depth = max_depth
+
+ self._net = nn.Sequential(
+ nn.Conv2d(in_features, mlp_dim, 1, 1, 0),
+ nn.GELU(),
+ nn.Conv2d(mlp_dim, prev_nbins * split_factor, 1, 1, 0),
+ nn.ReLU()
+ )
+
+ def forward(self, x, b_prev, prev_b_embedding=None, interpolate=True, is_for_query=False):
+ """
+ x : feature block; shape - n, c, h, w
+ b_prev : previous bin widths normed; shape - n, prev_nbins, h, w
+ """
+ if prev_b_embedding is not None:
+ if interpolate:
+ prev_b_embedding = nn.functional.interpolate(prev_b_embedding, x.shape[-2:], mode='bilinear', align_corners=True)
+ x = x + prev_b_embedding
+ S = self._net(x)
+ eps = 1e-3
+ S = S + eps
+ n, c, h, w = S.shape
+ S = S.view(n, self.prev_nbins, self.split_factor, h, w)
+ S_normed = S / S.sum(dim=2, keepdim=True) # fractional splits
+
+ b_prev = nn.functional.interpolate(b_prev, (h,w), mode='bilinear', align_corners=True)
+
+
+ b_prev = b_prev / b_prev.sum(dim=1, keepdim=True) # renormalize for gurantees
+ # print(b_prev.shape, S_normed.shape)
+ # if is_for_query:(1).expand(-1, b_prev.size(0)//n, -1, -1, -1, -1).flatten(0,1) # TODO ? can replace all this with a single torch.repeat?
+ b = b_prev.unsqueeze(2) * S_normed
+ b = b.flatten(1,2) # .shape n, prev_nbins * split_factor, h, w
+
+ # calculate bin centers for loss calculation
+ B_widths = (self.max_depth - self.min_depth) * b # .shape N, nprev * splitfactor, H, W
+ # pad has the form (left, right, top, bottom, front, back)
+ B_widths = nn.functional.pad(B_widths, (0,0,0,0,1,0), mode='constant', value=self.min_depth)
+ B_edges = torch.cumsum(B_widths, dim=1) # .shape NCHW
+
+ B_centers = 0.5 * (B_edges[:, :-1, ...] + B_edges[:,1:,...])
+ return b, B_centers
\ No newline at end of file
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/layers/patch_transformer.py b/RAVE-main/annotator/zoe/zoedepth/models/layers/patch_transformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..eacac4d38809db2b3c669d0134f91645225af531
--- /dev/null
+++ b/RAVE-main/annotator/zoe/zoedepth/models/layers/patch_transformer.py
@@ -0,0 +1,91 @@
+# MIT License
+
+# Copyright (c) 2022 Intelligent Systems Lab Org
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# File author: Shariq Farooq Bhat
+
+import torch
+import torch.nn as nn
+
+
+class PatchTransformerEncoder(nn.Module):
+ def __init__(self, in_channels, patch_size=10, embedding_dim=128, num_heads=4, use_class_token=False):
+ """ViT-like transformer block
+
+ Args:
+ in_channels (int): Input channels
+ patch_size (int, optional): patch size. Defaults to 10.
+ embedding_dim (int, optional): Embedding dimension in transformer model. Defaults to 128.
+ num_heads (int, optional): number of attention heads. Defaults to 4.
+ use_class_token (bool, optional): Whether to use extra token at the start for global accumulation (called as "class token"). Defaults to False.
+ """
+ super(PatchTransformerEncoder, self).__init__()
+ self.use_class_token = use_class_token
+ encoder_layers = nn.TransformerEncoderLayer(
+ embedding_dim, num_heads, dim_feedforward=1024)
+ self.transformer_encoder = nn.TransformerEncoder(
+ encoder_layers, num_layers=4) # takes shape S,N,E
+
+ self.embedding_convPxP = nn.Conv2d(in_channels, embedding_dim,
+ kernel_size=patch_size, stride=patch_size, padding=0)
+
+ def positional_encoding_1d(self, sequence_length, batch_size, embedding_dim, device='cpu'):
+ """Generate positional encodings
+
+ Args:
+ sequence_length (int): Sequence length
+ embedding_dim (int): Embedding dimension
+
+ Returns:
+ torch.Tensor SBE: Positional encodings
+ """
+ position = torch.arange(
+ 0, sequence_length, dtype=torch.float32, device=device).unsqueeze(1)
+ index = torch.arange(
+ 0, embedding_dim, 2, dtype=torch.float32, device=device).unsqueeze(0)
+ div_term = torch.exp(index * (-torch.log(torch.tensor(10000.0, device=device)) / embedding_dim))
+ pos_encoding = position * div_term
+ pos_encoding = torch.cat([torch.sin(pos_encoding), torch.cos(pos_encoding)], dim=1)
+ pos_encoding = pos_encoding.unsqueeze(1).repeat(1, batch_size, 1)
+ return pos_encoding
+
+
+ def forward(self, x):
+ """Forward pass
+
+ Args:
+ x (torch.Tensor - NCHW): Input feature tensor
+
+ Returns:
+ torch.Tensor - SNE: Transformer output embeddings. S - sequence length (=HW/patch_size^2), N - batch size, E - embedding dim
+ """
+ embeddings = self.embedding_convPxP(x).flatten(
+ 2) # .shape = n,c,s = n, embedding_dim, s
+ if self.use_class_token:
+ # extra special token at start ?
+ embeddings = nn.functional.pad(embeddings, (1, 0))
+
+ # change to S,N,E format required by transformer
+ embeddings = embeddings.permute(2, 0, 1)
+ S, N, E = embeddings.shape
+ embeddings = embeddings + self.positional_encoding_1d(S, N, E, device=embeddings.device)
+ x = self.transformer_encoder(embeddings) # .shape = S, N, E
+ return x
diff --git a/RAVE-main/annotator/zoe/zoedepth/models/model_io.py b/RAVE-main/annotator/zoe/zoedepth/models/model_io.py
new file mode 100644
index 0000000000000000000000000000000000000000..f63a9a1ff09e98da78a9a3da63e58509471f728d
--- /dev/null
+++ b/RAVE-main/annotator/zoe/zoedepth/models/model_io.py
@@ -0,0 +1,92 @@
+# MIT License
+
+# Copyright (c) 2022 Intelligent Systems Lab Org
+
+# Permission is hereby granted, free of charge, to any person obtaining a copy
+# of this software and associated documentation files (the "Software"), to deal
+# in the Software without restriction, including without limitation the rights
+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+# copies of the Software, and to permit persons to whom the Software is
+# furnished to do so, subject to the following conditions:
+
+# The above copyright notice and this permission notice shall be included in all
+# copies or substantial portions of the Software.
+
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+# File author: Shariq Farooq Bhat
+
+import torch
+
+def load_state_dict(model, state_dict):
+ """Load state_dict into model, handling DataParallel and DistributedDataParallel. Also checks for "model" key in state_dict.
+
+ DataParallel prefixes state_dict keys with 'module.' when saving.
+ If the model is not a DataParallel model but the state_dict is, then prefixes are removed.
+ If the model is a DataParallel model but the state_dict is not, then prefixes are added.
+ """
+ state_dict = state_dict.get('model', state_dict)
+ # if model is a DataParallel model, then state_dict keys are prefixed with 'module.'
+
+ do_prefix = isinstance(
+ model, (torch.nn.DataParallel, torch.nn.parallel.DistributedDataParallel))
+ state = {}
+ for k, v in state_dict.items():
+ if k.startswith('module.') and not do_prefix:
+ k = k[7:]
+
+ if not k.startswith('module.') and do_prefix:
+ k = 'module.' + k
+
+ state[k] = v
+
+ model.load_state_dict(state)
+ print("Loaded successfully")
+ return model
+
+
+def load_wts(model, checkpoint_path):
+ ckpt = torch.load(checkpoint_path, map_location='cpu')
+ return load_state_dict(model, ckpt)
+
+
+def load_state_dict_from_url(model, url, **kwargs):
+ state_dict = torch.hub.load_state_dict_from_url(url, map_location='cpu', **kwargs)
+ return load_state_dict(model, state_dict)
+
+
+def load_state_from_resource(model, resource: str):
+ """Loads weights to the model from a given resource. A resource can be of following types:
+ 1. URL. Prefixed with "url::"
+ e.g. url::http(s)://url.resource.com/ckpt.pt
+
+ 2. Local path. Prefixed with "local::"
+ e.g. local::/path/to/ckpt.pt
+
+
+ Args:
+ model (torch.nn.Module): Model
+ resource (str): resource string
+
+ Returns:
+ torch.nn.Module: Model with loaded weights
+ """
+ print(f"Using pretrained resource {resource}")
+
+ if resource.startswith('url::'):
+ url = resource.split('url::')[1]
+ return load_state_dict_from_url(model, url, progress=True)
+
+ elif resource.startswith('local::'):
+ path = resource.split('local::')[1]
+ return load_wts(model, path)
+
+ else:
+ raise ValueError("Invalid resource type, only url:: and local:: are supported")
+
\ No newline at end of file