Moyao001 commited on
Commit
58b420f
·
verified ·
1 Parent(s): 1abcbdc

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/data/ddad.py +117 -0
  2. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/data/diml_indoor_test.py +125 -0
  3. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/data/diml_outdoor_test.py +114 -0
  4. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/data/diode.py +125 -0
  5. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/data/hypersim.py +138 -0
  6. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/data/ibims.py +81 -0
  7. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/data/preprocess.py +154 -0
  8. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/data/sun_rgbd_loader.py +106 -0
  9. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/data/transforms.py +481 -0
  10. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/data/vkitti.py +151 -0
  11. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/data/vkitti2.py +187 -0
  12. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/__pycache__/__init__.cpython-39.pyc +0 -0
  13. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/__pycache__/depth_model.cpython-39.pyc +0 -0
  14. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/__pycache__/model_io.cpython-39.pyc +0 -0
  15. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/__pycache__/__init__.cpython-39.pyc +0 -0
  16. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/__pycache__/midas.cpython-39.pyc +0 -0
  17. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/levit.py +106 -0
  18. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/next_vit.py +39 -0
  19. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/swin.py +13 -0
  20. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/swin_common.py +52 -0
  21. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/vit.py +221 -0
  22. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/android/app/src/androidTest/assets/fox-mobilenet_v1_1.0_224_support.txt +3 -0
  23. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/android/app/src/androidTest/assets/fox-mobilenet_v1_1.0_224_task_api.txt +3 -0
  24. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/android/app/src/androidTest/java/AndroidManifest.xml +5 -0
  25. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/android/app/src/androidTest/java/org/tensorflow/lite/examples/classification/ClassifierTest.java +121 -0
  26. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/android/app/src/main/res/drawable-v24/ic_launcher_foreground.xml +34 -0
  27. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/android/lib_task_api/src/main/java/org/tensorflow/lite/examples/classification/tflite/Classifier.java +278 -0
  28. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/android/lib_task_api/src/main/java/org/tensorflow/lite/examples/classification/tflite/ClassifierFloatEfficientNet.java +45 -0
  29. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/android/lib_task_api/src/main/java/org/tensorflow/lite/examples/classification/tflite/ClassifierQuantizedEfficientNet.java +43 -0
  30. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/android/lib_task_api/src/main/java/org/tensorflow/lite/examples/classification/tflite/ClassifierQuantizedMobileNet.java +44 -0
  31. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/android/models/build.gradle +40 -0
  32. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/android/models/download.gradle +10 -0
  33. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/android/models/proguard-rules.pro +21 -0
  34. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/android/models/src/main/AndroidManifest.xml +3 -0
  35. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/android/models/src/main/assets/run_tflite.py +75 -0
  36. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/additions/do_catkin_make.sh +5 -0
  37. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/additions/install_ros_melodic_ubuntu_17_18.sh +34 -0
  38. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/CMakeLists.txt +189 -0
  39. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/launch/midas_cpp.launch +19 -0
  40. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/launch/midas_talker_listener.launch +23 -0
  41. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/package.xml +77 -0
  42. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/scripts/listener.py +61 -0
  43. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/scripts/listener_original.py +61 -0
  44. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/scripts/talker.py +53 -0
  45. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/src/main.cpp +285 -0
  46. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/README.md +147 -0
  47. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/make_onnx_model.py +112 -0
  48. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/run_pb.py +135 -0
  49. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/transforms.py +234 -0
  50. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/utils.py +82 -0
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/data/ddad.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+
3
+ # Copyright (c) 2022 Intelligent Systems Lab Org
4
+
5
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ # of this software and associated documentation files (the "Software"), to deal
7
+ # in the Software without restriction, including without limitation the rights
8
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ # copies of the Software, and to permit persons to whom the Software is
10
+ # furnished to do so, subject to the following conditions:
11
+
12
+ # The above copyright notice and this permission notice shall be included in all
13
+ # copies or substantial portions of the Software.
14
+
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ # SOFTWARE.
22
+
23
+ # File author: Shariq Farooq Bhat
24
+
25
+ import os
26
+
27
+ import numpy as np
28
+ import torch
29
+ from PIL import Image
30
+ from torch.utils.data import DataLoader, Dataset
31
+ from torchvision import transforms
32
+
33
+
34
+ class ToTensor(object):
35
+ def __init__(self, resize_shape):
36
+ # self.normalize = transforms.Normalize(
37
+ # mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
38
+ self.normalize = lambda x : x
39
+ self.resize = transforms.Resize(resize_shape)
40
+
41
+ def __call__(self, sample):
42
+ image, depth = sample['image'], sample['depth']
43
+ image = self.to_tensor(image)
44
+ image = self.normalize(image)
45
+ depth = self.to_tensor(depth)
46
+
47
+ image = self.resize(image)
48
+
49
+ return {'image': image, 'depth': depth, 'dataset': "ddad"}
50
+
51
+ def to_tensor(self, pic):
52
+
53
+ if isinstance(pic, np.ndarray):
54
+ img = torch.from_numpy(pic.transpose((2, 0, 1)))
55
+ return img
56
+
57
+ # # handle PIL Image
58
+ if pic.mode == 'I':
59
+ img = torch.from_numpy(np.array(pic, np.int32, copy=False))
60
+ elif pic.mode == 'I;16':
61
+ img = torch.from_numpy(np.array(pic, np.int16, copy=False))
62
+ else:
63
+ img = torch.ByteTensor(
64
+ torch.ByteStorage.from_buffer(pic.tobytes()))
65
+ # PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK
66
+ if pic.mode == 'YCbCr':
67
+ nchannel = 3
68
+ elif pic.mode == 'I;16':
69
+ nchannel = 1
70
+ else:
71
+ nchannel = len(pic.mode)
72
+ img = img.view(pic.size[1], pic.size[0], nchannel)
73
+
74
+ img = img.transpose(0, 1).transpose(0, 2).contiguous()
75
+
76
+ if isinstance(img, torch.ByteTensor):
77
+ return img.float()
78
+ else:
79
+ return img
80
+
81
+
82
+ class DDAD(Dataset):
83
+ def __init__(self, data_dir_root, resize_shape):
84
+ import glob
85
+
86
+ # image paths are of the form <data_dir_root>/{outleft, depthmap}/*.png
87
+ self.image_files = glob.glob(os.path.join(data_dir_root, '*.png'))
88
+ self.depth_files = [r.replace("_rgb.png", "_depth.npy")
89
+ for r in self.image_files]
90
+ self.transform = ToTensor(resize_shape)
91
+
92
+ def __getitem__(self, idx):
93
+
94
+ image_path = self.image_files[idx]
95
+ depth_path = self.depth_files[idx]
96
+
97
+ image = np.asarray(Image.open(image_path), dtype=np.float32) / 255.0
98
+ depth = np.load(depth_path) # meters
99
+
100
+ # depth[depth > 8] = -1
101
+ depth = depth[..., None]
102
+
103
+ sample = dict(image=image, depth=depth)
104
+ sample = self.transform(sample)
105
+
106
+ if idx == 0:
107
+ print(sample["image"].shape)
108
+
109
+ return sample
110
+
111
+ def __len__(self):
112
+ return len(self.image_files)
113
+
114
+
115
+ def get_ddad_loader(data_dir_root, resize_shape, batch_size=1, **kwargs):
116
+ dataset = DDAD(data_dir_root, resize_shape)
117
+ return DataLoader(dataset, batch_size, **kwargs)
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/data/diml_indoor_test.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+
3
+ # Copyright (c) 2022 Intelligent Systems Lab Org
4
+
5
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ # of this software and associated documentation files (the "Software"), to deal
7
+ # in the Software without restriction, including without limitation the rights
8
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ # copies of the Software, and to permit persons to whom the Software is
10
+ # furnished to do so, subject to the following conditions:
11
+
12
+ # The above copyright notice and this permission notice shall be included in all
13
+ # copies or substantial portions of the Software.
14
+
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ # SOFTWARE.
22
+
23
+ # File author: Shariq Farooq Bhat
24
+
25
+ import os
26
+
27
+ import numpy as np
28
+ import torch
29
+ from PIL import Image
30
+ from torch.utils.data import DataLoader, Dataset
31
+ from torchvision import transforms
32
+
33
+
34
+ class ToTensor(object):
35
+ def __init__(self):
36
+ # self.normalize = transforms.Normalize(
37
+ # mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
38
+ self.normalize = lambda x : x
39
+ self.resize = transforms.Resize((480, 640))
40
+
41
+ def __call__(self, sample):
42
+ image, depth = sample['image'], sample['depth']
43
+ image = self.to_tensor(image)
44
+ image = self.normalize(image)
45
+ depth = self.to_tensor(depth)
46
+
47
+ image = self.resize(image)
48
+
49
+ return {'image': image, 'depth': depth, 'dataset': "diml_indoor"}
50
+
51
+ def to_tensor(self, pic):
52
+
53
+ if isinstance(pic, np.ndarray):
54
+ img = torch.from_numpy(pic.transpose((2, 0, 1)))
55
+ return img
56
+
57
+ # # handle PIL Image
58
+ if pic.mode == 'I':
59
+ img = torch.from_numpy(np.array(pic, np.int32, copy=False))
60
+ elif pic.mode == 'I;16':
61
+ img = torch.from_numpy(np.array(pic, np.int16, copy=False))
62
+ else:
63
+ img = torch.ByteTensor(
64
+ torch.ByteStorage.from_buffer(pic.tobytes()))
65
+ # PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK
66
+ if pic.mode == 'YCbCr':
67
+ nchannel = 3
68
+ elif pic.mode == 'I;16':
69
+ nchannel = 1
70
+ else:
71
+ nchannel = len(pic.mode)
72
+ img = img.view(pic.size[1], pic.size[0], nchannel)
73
+
74
+ img = img.transpose(0, 1).transpose(0, 2).contiguous()
75
+ if isinstance(img, torch.ByteTensor):
76
+ return img.float()
77
+ else:
78
+ return img
79
+
80
+
81
+ class DIML_Indoor(Dataset):
82
+ def __init__(self, data_dir_root):
83
+ import glob
84
+
85
+ # image paths are of the form <data_dir_root>/{HR, LR}/<scene>/{color, depth_filled}/*.png
86
+ self.image_files = glob.glob(os.path.join(
87
+ data_dir_root, "LR", '*', 'color', '*.png'))
88
+ self.depth_files = [r.replace("color", "depth_filled").replace(
89
+ "_c.png", "_depth_filled.png") for r in self.image_files]
90
+ self.transform = ToTensor()
91
+
92
+ def __getitem__(self, idx):
93
+ image_path = self.image_files[idx]
94
+ depth_path = self.depth_files[idx]
95
+
96
+ image = np.asarray(Image.open(image_path), dtype=np.float32) / 255.0
97
+ depth = np.asarray(Image.open(depth_path),
98
+ dtype='uint16') / 1000.0 # mm to meters
99
+
100
+ # print(np.shape(image))
101
+ # print(np.shape(depth))
102
+
103
+ # depth[depth > 8] = -1
104
+ depth = depth[..., None]
105
+
106
+ sample = dict(image=image, depth=depth)
107
+
108
+ # return sample
109
+ sample = self.transform(sample)
110
+
111
+ if idx == 0:
112
+ print(sample["image"].shape)
113
+
114
+ return sample
115
+
116
+ def __len__(self):
117
+ return len(self.image_files)
118
+
119
+
120
+ def get_diml_indoor_loader(data_dir_root, batch_size=1, **kwargs):
121
+ dataset = DIML_Indoor(data_dir_root)
122
+ return DataLoader(dataset, batch_size, **kwargs)
123
+
124
+ # get_diml_indoor_loader(data_dir_root="datasets/diml/indoor/test/HR")
125
+ # get_diml_indoor_loader(data_dir_root="datasets/diml/indoor/test/LR")
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/data/diml_outdoor_test.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+
3
+ # Copyright (c) 2022 Intelligent Systems Lab Org
4
+
5
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ # of this software and associated documentation files (the "Software"), to deal
7
+ # in the Software without restriction, including without limitation the rights
8
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ # copies of the Software, and to permit persons to whom the Software is
10
+ # furnished to do so, subject to the following conditions:
11
+
12
+ # The above copyright notice and this permission notice shall be included in all
13
+ # copies or substantial portions of the Software.
14
+
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ # SOFTWARE.
22
+
23
+ # File author: Shariq Farooq Bhat
24
+
25
+ import os
26
+
27
+ import numpy as np
28
+ import torch
29
+ from PIL import Image
30
+ from torch.utils.data import DataLoader, Dataset
31
+ from torchvision import transforms
32
+
33
+
34
+ class ToTensor(object):
35
+ def __init__(self):
36
+ # self.normalize = transforms.Normalize(
37
+ # mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
38
+ self.normalize = lambda x : x
39
+
40
+ def __call__(self, sample):
41
+ image, depth = sample['image'], sample['depth']
42
+ image = self.to_tensor(image)
43
+ image = self.normalize(image)
44
+ depth = self.to_tensor(depth)
45
+
46
+ return {'image': image, 'depth': depth, 'dataset': "diml_outdoor"}
47
+
48
+ def to_tensor(self, pic):
49
+
50
+ if isinstance(pic, np.ndarray):
51
+ img = torch.from_numpy(pic.transpose((2, 0, 1)))
52
+ return img
53
+
54
+ # # handle PIL Image
55
+ if pic.mode == 'I':
56
+ img = torch.from_numpy(np.array(pic, np.int32, copy=False))
57
+ elif pic.mode == 'I;16':
58
+ img = torch.from_numpy(np.array(pic, np.int16, copy=False))
59
+ else:
60
+ img = torch.ByteTensor(
61
+ torch.ByteStorage.from_buffer(pic.tobytes()))
62
+ # PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK
63
+ if pic.mode == 'YCbCr':
64
+ nchannel = 3
65
+ elif pic.mode == 'I;16':
66
+ nchannel = 1
67
+ else:
68
+ nchannel = len(pic.mode)
69
+ img = img.view(pic.size[1], pic.size[0], nchannel)
70
+
71
+ img = img.transpose(0, 1).transpose(0, 2).contiguous()
72
+ if isinstance(img, torch.ByteTensor):
73
+ return img.float()
74
+ else:
75
+ return img
76
+
77
+
78
+ class DIML_Outdoor(Dataset):
79
+ def __init__(self, data_dir_root):
80
+ import glob
81
+
82
+ # image paths are of the form <data_dir_root>/{outleft, depthmap}/*.png
83
+ self.image_files = glob.glob(os.path.join(
84
+ data_dir_root, "*", 'outleft', '*.png'))
85
+ self.depth_files = [r.replace("outleft", "depthmap")
86
+ for r in self.image_files]
87
+ self.transform = ToTensor()
88
+
89
+ def __getitem__(self, idx):
90
+ image_path = self.image_files[idx]
91
+ depth_path = self.depth_files[idx]
92
+
93
+ image = np.asarray(Image.open(image_path), dtype=np.float32) / 255.0
94
+ depth = np.asarray(Image.open(depth_path),
95
+ dtype='uint16') / 1000.0 # mm to meters
96
+
97
+ # depth[depth > 8] = -1
98
+ depth = depth[..., None]
99
+
100
+ sample = dict(image=image, depth=depth, dataset="diml_outdoor")
101
+
102
+ # return sample
103
+ return self.transform(sample)
104
+
105
+ def __len__(self):
106
+ return len(self.image_files)
107
+
108
+
109
+ def get_diml_outdoor_loader(data_dir_root, batch_size=1, **kwargs):
110
+ dataset = DIML_Outdoor(data_dir_root)
111
+ return DataLoader(dataset, batch_size, **kwargs)
112
+
113
+ # get_diml_outdoor_loader(data_dir_root="datasets/diml/outdoor/test/HR")
114
+ # get_diml_outdoor_loader(data_dir_root="datasets/diml/outdoor/test/LR")
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/data/diode.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+
3
+ # Copyright (c) 2022 Intelligent Systems Lab Org
4
+
5
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ # of this software and associated documentation files (the "Software"), to deal
7
+ # in the Software without restriction, including without limitation the rights
8
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ # copies of the Software, and to permit persons to whom the Software is
10
+ # furnished to do so, subject to the following conditions:
11
+
12
+ # The above copyright notice and this permission notice shall be included in all
13
+ # copies or substantial portions of the Software.
14
+
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ # SOFTWARE.
22
+
23
+ # File author: Shariq Farooq Bhat
24
+
25
+ import os
26
+
27
+ import numpy as np
28
+ import torch
29
+ from PIL import Image
30
+ from torch.utils.data import DataLoader, Dataset
31
+ from torchvision import transforms
32
+
33
+
34
+ class ToTensor(object):
35
+ def __init__(self):
36
+ # self.normalize = transforms.Normalize(
37
+ # mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
38
+ self.normalize = lambda x : x
39
+ self.resize = transforms.Resize(480)
40
+
41
+ def __call__(self, sample):
42
+ image, depth = sample['image'], sample['depth']
43
+ image = self.to_tensor(image)
44
+ image = self.normalize(image)
45
+ depth = self.to_tensor(depth)
46
+
47
+ image = self.resize(image)
48
+
49
+ return {'image': image, 'depth': depth, 'dataset': "diode"}
50
+
51
+ def to_tensor(self, pic):
52
+
53
+ if isinstance(pic, np.ndarray):
54
+ img = torch.from_numpy(pic.transpose((2, 0, 1)))
55
+ return img
56
+
57
+ # # handle PIL Image
58
+ if pic.mode == 'I':
59
+ img = torch.from_numpy(np.array(pic, np.int32, copy=False))
60
+ elif pic.mode == 'I;16':
61
+ img = torch.from_numpy(np.array(pic, np.int16, copy=False))
62
+ else:
63
+ img = torch.ByteTensor(
64
+ torch.ByteStorage.from_buffer(pic.tobytes()))
65
+ # PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK
66
+ if pic.mode == 'YCbCr':
67
+ nchannel = 3
68
+ elif pic.mode == 'I;16':
69
+ nchannel = 1
70
+ else:
71
+ nchannel = len(pic.mode)
72
+ img = img.view(pic.size[1], pic.size[0], nchannel)
73
+
74
+ img = img.transpose(0, 1).transpose(0, 2).contiguous()
75
+
76
+ if isinstance(img, torch.ByteTensor):
77
+ return img.float()
78
+ else:
79
+ return img
80
+
81
+
82
+ class DIODE(Dataset):
83
+ def __init__(self, data_dir_root):
84
+ import glob
85
+
86
+ # image paths are of the form <data_dir_root>/scene_#/scan_#/*.png
87
+ self.image_files = glob.glob(
88
+ os.path.join(data_dir_root, '*', '*', '*.png'))
89
+ self.depth_files = [r.replace(".png", "_depth.npy")
90
+ for r in self.image_files]
91
+ self.depth_mask_files = [
92
+ r.replace(".png", "_depth_mask.npy") for r in self.image_files]
93
+ self.transform = ToTensor()
94
+
95
+ def __getitem__(self, idx):
96
+ image_path = self.image_files[idx]
97
+ depth_path = self.depth_files[idx]
98
+ depth_mask_path = self.depth_mask_files[idx]
99
+
100
+ image = np.asarray(Image.open(image_path), dtype=np.float32) / 255.0
101
+ depth = np.load(depth_path) # in meters
102
+ valid = np.load(depth_mask_path) # binary
103
+
104
+ # depth[depth > 8] = -1
105
+ # depth = depth[..., None]
106
+
107
+ sample = dict(image=image, depth=depth, valid=valid)
108
+
109
+ # return sample
110
+ sample = self.transform(sample)
111
+
112
+ if idx == 0:
113
+ print(sample["image"].shape)
114
+
115
+ return sample
116
+
117
+ def __len__(self):
118
+ return len(self.image_files)
119
+
120
+
121
+ def get_diode_loader(data_dir_root, batch_size=1, **kwargs):
122
+ dataset = DIODE(data_dir_root)
123
+ return DataLoader(dataset, batch_size, **kwargs)
124
+
125
+ # get_diode_loader(data_dir_root="datasets/diode/val/outdoor")
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/data/hypersim.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+
3
+ # Copyright (c) 2022 Intelligent Systems Lab Org
4
+
5
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ # of this software and associated documentation files (the "Software"), to deal
7
+ # in the Software without restriction, including without limitation the rights
8
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ # copies of the Software, and to permit persons to whom the Software is
10
+ # furnished to do so, subject to the following conditions:
11
+
12
+ # The above copyright notice and this permission notice shall be included in all
13
+ # copies or substantial portions of the Software.
14
+
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ # SOFTWARE.
22
+
23
+ # File author: Shariq Farooq Bhat
24
+
25
+ import glob
26
+ import os
27
+
28
+ import h5py
29
+ import numpy as np
30
+ import torch
31
+ from PIL import Image
32
+ from torch.utils.data import DataLoader, Dataset
33
+ from torchvision import transforms
34
+
35
+
36
+ def hypersim_distance_to_depth(npyDistance):
37
+ intWidth, intHeight, fltFocal = 1024, 768, 886.81
38
+
39
+ npyImageplaneX = np.linspace((-0.5 * intWidth) + 0.5, (0.5 * intWidth) - 0.5, intWidth).reshape(
40
+ 1, intWidth).repeat(intHeight, 0).astype(np.float32)[:, :, None]
41
+ npyImageplaneY = np.linspace((-0.5 * intHeight) + 0.5, (0.5 * intHeight) - 0.5,
42
+ intHeight).reshape(intHeight, 1).repeat(intWidth, 1).astype(np.float32)[:, :, None]
43
+ npyImageplaneZ = np.full([intHeight, intWidth, 1], fltFocal, np.float32)
44
+ npyImageplane = np.concatenate(
45
+ [npyImageplaneX, npyImageplaneY, npyImageplaneZ], 2)
46
+
47
+ npyDepth = npyDistance / np.linalg.norm(npyImageplane, 2, 2) * fltFocal
48
+ return npyDepth
49
+
50
+
51
+ class ToTensor(object):
52
+ def __init__(self):
53
+ # self.normalize = transforms.Normalize(
54
+ # mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
55
+ self.normalize = lambda x: x
56
+ self.resize = transforms.Resize((480, 640))
57
+
58
+ def __call__(self, sample):
59
+ image, depth = sample['image'], sample['depth']
60
+ image = self.to_tensor(image)
61
+ image = self.normalize(image)
62
+ depth = self.to_tensor(depth)
63
+
64
+ image = self.resize(image)
65
+
66
+ return {'image': image, 'depth': depth, 'dataset': "hypersim"}
67
+
68
+ def to_tensor(self, pic):
69
+
70
+ if isinstance(pic, np.ndarray):
71
+ img = torch.from_numpy(pic.transpose((2, 0, 1)))
72
+ return img
73
+
74
+ # # handle PIL Image
75
+ if pic.mode == 'I':
76
+ img = torch.from_numpy(np.array(pic, np.int32, copy=False))
77
+ elif pic.mode == 'I;16':
78
+ img = torch.from_numpy(np.array(pic, np.int16, copy=False))
79
+ else:
80
+ img = torch.ByteTensor(
81
+ torch.ByteStorage.from_buffer(pic.tobytes()))
82
+ # PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK
83
+ if pic.mode == 'YCbCr':
84
+ nchannel = 3
85
+ elif pic.mode == 'I;16':
86
+ nchannel = 1
87
+ else:
88
+ nchannel = len(pic.mode)
89
+ img = img.view(pic.size[1], pic.size[0], nchannel)
90
+
91
+ img = img.transpose(0, 1).transpose(0, 2).contiguous()
92
+ if isinstance(img, torch.ByteTensor):
93
+ return img.float()
94
+ else:
95
+ return img
96
+
97
+
98
+ class HyperSim(Dataset):
99
+ def __init__(self, data_dir_root):
100
+ # image paths are of the form <data_dir_root>/<scene>/images/scene_cam_#_final_preview/*.tonemap.jpg
101
+ # depth paths are of the form <data_dir_root>/<scene>/images/scene_cam_#_final_preview/*.depth_meters.hdf5
102
+ self.image_files = glob.glob(os.path.join(
103
+ data_dir_root, '*', 'images', 'scene_cam_*_final_preview', '*.tonemap.jpg'))
104
+ self.depth_files = [r.replace("_final_preview", "_geometry_hdf5").replace(
105
+ ".tonemap.jpg", ".depth_meters.hdf5") for r in self.image_files]
106
+ self.transform = ToTensor()
107
+
108
+ def __getitem__(self, idx):
109
+ image_path = self.image_files[idx]
110
+ depth_path = self.depth_files[idx]
111
+
112
+ image = np.asarray(Image.open(image_path), dtype=np.float32) / 255.0
113
+
114
+ # depth from hdf5
115
+ depth_fd = h5py.File(depth_path, "r")
116
+ # in meters (Euclidean distance)
117
+ distance_meters = np.array(depth_fd['dataset'])
118
+ depth = hypersim_distance_to_depth(
119
+ distance_meters) # in meters (planar depth)
120
+
121
+ # depth[depth > 8] = -1
122
+ depth = depth[..., None]
123
+
124
+ sample = dict(image=image, depth=depth)
125
+ sample = self.transform(sample)
126
+
127
+ if idx == 0:
128
+ print(sample["image"].shape)
129
+
130
+ return sample
131
+
132
+ def __len__(self):
133
+ return len(self.image_files)
134
+
135
+
136
+ def get_hypersim_loader(data_dir_root, batch_size=1, **kwargs):
137
+ dataset = HyperSim(data_dir_root)
138
+ return DataLoader(dataset, batch_size, **kwargs)
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/data/ibims.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+
3
+ # Copyright (c) 2022 Intelligent Systems Lab Org
4
+
5
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ # of this software and associated documentation files (the "Software"), to deal
7
+ # in the Software without restriction, including without limitation the rights
8
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ # copies of the Software, and to permit persons to whom the Software is
10
+ # furnished to do so, subject to the following conditions:
11
+
12
+ # The above copyright notice and this permission notice shall be included in all
13
+ # copies or substantial portions of the Software.
14
+
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ # SOFTWARE.
22
+
23
+ # File author: Shariq Farooq Bhat
24
+
25
+ import os
26
+
27
+ import numpy as np
28
+ import torch
29
+ from PIL import Image
30
+ from torch.utils.data import DataLoader, Dataset
31
+ from torchvision import transforms as T
32
+
33
+
34
+ class iBims(Dataset):
35
+ def __init__(self, config):
36
+ root_folder = config.ibims_root
37
+ with open(os.path.join(root_folder, "imagelist.txt"), 'r') as f:
38
+ imglist = f.read().split()
39
+
40
+ samples = []
41
+ for basename in imglist:
42
+ img_path = os.path.join(root_folder, 'rgb', basename + ".png")
43
+ depth_path = os.path.join(root_folder, 'depth', basename + ".png")
44
+ valid_mask_path = os.path.join(
45
+ root_folder, 'mask_invalid', basename+".png")
46
+ transp_mask_path = os.path.join(
47
+ root_folder, 'mask_transp', basename+".png")
48
+
49
+ samples.append(
50
+ (img_path, depth_path, valid_mask_path, transp_mask_path))
51
+
52
+ self.samples = samples
53
+ # self.normalize = T.Normalize(
54
+ # mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
55
+ self.normalize = lambda x : x
56
+
57
+ def __getitem__(self, idx):
58
+ img_path, depth_path, valid_mask_path, transp_mask_path = self.samples[idx]
59
+
60
+ img = np.asarray(Image.open(img_path), dtype=np.float32) / 255.0
61
+ depth = np.asarray(Image.open(depth_path),
62
+ dtype=np.uint16).astype('float')*50.0/65535
63
+
64
+ mask_valid = np.asarray(Image.open(valid_mask_path))
65
+ mask_transp = np.asarray(Image.open(transp_mask_path))
66
+
67
+ # depth = depth * mask_valid * mask_transp
68
+ depth = np.where(mask_valid * mask_transp, depth, -1)
69
+
70
+ img = torch.from_numpy(img).permute(2, 0, 1)
71
+ img = self.normalize(img)
72
+ depth = torch.from_numpy(depth).unsqueeze(0)
73
+ return dict(image=img, depth=depth, image_path=img_path, depth_path=depth_path, dataset='ibims')
74
+
75
+ def __len__(self):
76
+ return len(self.samples)
77
+
78
+
79
+ def get_ibims_loader(config, batch_size=1, **kwargs):
80
+ dataloader = DataLoader(iBims(config), batch_size=batch_size, **kwargs)
81
+ return dataloader
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/data/preprocess.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+
3
+ # Copyright (c) 2022 Intelligent Systems Lab Org
4
+
5
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ # of this software and associated documentation files (the "Software"), to deal
7
+ # in the Software without restriction, including without limitation the rights
8
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ # copies of the Software, and to permit persons to whom the Software is
10
+ # furnished to do so, subject to the following conditions:
11
+
12
+ # The above copyright notice and this permission notice shall be included in all
13
+ # copies or substantial portions of the Software.
14
+
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ # SOFTWARE.
22
+
23
+ # File author: Shariq Farooq Bhat
24
+
25
+ import numpy as np
26
+ from dataclasses import dataclass
27
+ from typing import Tuple, List
28
+
29
+ # dataclass to store the crop parameters
30
+ @dataclass
31
+ class CropParams:
32
+ top: int
33
+ bottom: int
34
+ left: int
35
+ right: int
36
+
37
+
38
+
39
+ def get_border_params(rgb_image, tolerance=0.1, cut_off=20, value=0, level_diff_threshold=5, channel_axis=-1, min_border=5) -> CropParams:
40
+ gray_image = np.mean(rgb_image, axis=channel_axis)
41
+ h, w = gray_image.shape
42
+
43
+
44
+ def num_value_pixels(arr):
45
+ return np.sum(np.abs(arr - value) < level_diff_threshold)
46
+
47
+ def is_above_tolerance(arr, total_pixels):
48
+ return (num_value_pixels(arr) / total_pixels) > tolerance
49
+
50
+ # Crop top border until number of value pixels become below tolerance
51
+ top = min_border
52
+ while is_above_tolerance(gray_image[top, :], w) and top < h-1:
53
+ top += 1
54
+ if top > cut_off:
55
+ break
56
+
57
+ # Crop bottom border until number of value pixels become below tolerance
58
+ bottom = h - min_border
59
+ while is_above_tolerance(gray_image[bottom, :], w) and bottom > 0:
60
+ bottom -= 1
61
+ if h - bottom > cut_off:
62
+ break
63
+
64
+ # Crop left border until number of value pixels become below tolerance
65
+ left = min_border
66
+ while is_above_tolerance(gray_image[:, left], h) and left < w-1:
67
+ left += 1
68
+ if left > cut_off:
69
+ break
70
+
71
+ # Crop right border until number of value pixels become below tolerance
72
+ right = w - min_border
73
+ while is_above_tolerance(gray_image[:, right], h) and right > 0:
74
+ right -= 1
75
+ if w - right > cut_off:
76
+ break
77
+
78
+
79
+ return CropParams(top, bottom, left, right)
80
+
81
+
82
+ def get_white_border(rgb_image, value=255, **kwargs) -> CropParams:
83
+ """Crops the white border of the RGB.
84
+
85
+ Args:
86
+ rgb: RGB image, shape (H, W, 3).
87
+ Returns:
88
+ Crop parameters.
89
+ """
90
+ if value == 255:
91
+ # assert range of values in rgb image is [0, 255]
92
+ assert np.max(rgb_image) <= 255 and np.min(rgb_image) >= 0, "RGB image values are not in range [0, 255]."
93
+ assert rgb_image.max() > 1, "RGB image values are not in range [0, 255]."
94
+ elif value == 1:
95
+ # assert range of values in rgb image is [0, 1]
96
+ assert np.max(rgb_image) <= 1 and np.min(rgb_image) >= 0, "RGB image values are not in range [0, 1]."
97
+
98
+ return get_border_params(rgb_image, value=value, **kwargs)
99
+
100
+ def get_black_border(rgb_image, **kwargs) -> CropParams:
101
+ """Crops the black border of the RGB.
102
+
103
+ Args:
104
+ rgb: RGB image, shape (H, W, 3).
105
+
106
+ Returns:
107
+ Crop parameters.
108
+ """
109
+
110
+ return get_border_params(rgb_image, value=0, **kwargs)
111
+
112
+ def crop_image(image: np.ndarray, crop_params: CropParams) -> np.ndarray:
113
+ """Crops the image according to the crop parameters.
114
+
115
+ Args:
116
+ image: RGB or depth image, shape (H, W, 3) or (H, W).
117
+ crop_params: Crop parameters.
118
+
119
+ Returns:
120
+ Cropped image.
121
+ """
122
+ return image[crop_params.top:crop_params.bottom, crop_params.left:crop_params.right]
123
+
124
+ def crop_images(*images: np.ndarray, crop_params: CropParams) -> Tuple[np.ndarray]:
125
+ """Crops the images according to the crop parameters.
126
+
127
+ Args:
128
+ images: RGB or depth images, shape (H, W, 3) or (H, W).
129
+ crop_params: Crop parameters.
130
+
131
+ Returns:
132
+ Cropped images.
133
+ """
134
+ return tuple(crop_image(image, crop_params) for image in images)
135
+
136
+ def crop_black_or_white_border(rgb_image, *other_images: np.ndarray, tolerance=0.1, cut_off=20, level_diff_threshold=5) -> Tuple[np.ndarray]:
137
+ """Crops the white and black border of the RGB and depth images.
138
+
139
+ Args:
140
+ rgb: RGB image, shape (H, W, 3). This image is used to determine the border.
141
+ other_images: The other images to crop according to the border of the RGB image.
142
+ Returns:
143
+ Cropped RGB and other images.
144
+ """
145
+ # crop black border
146
+ crop_params = get_black_border(rgb_image, tolerance=tolerance, cut_off=cut_off, level_diff_threshold=level_diff_threshold)
147
+ cropped_images = crop_images(rgb_image, *other_images, crop_params=crop_params)
148
+
149
+ # crop white border
150
+ crop_params = get_white_border(cropped_images[0], tolerance=tolerance, cut_off=cut_off, level_diff_threshold=level_diff_threshold)
151
+ cropped_images = crop_images(*cropped_images, crop_params=crop_params)
152
+
153
+ return cropped_images
154
+
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/data/sun_rgbd_loader.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+
3
+ # Copyright (c) 2022 Intelligent Systems Lab Org
4
+
5
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ # of this software and associated documentation files (the "Software"), to deal
7
+ # in the Software without restriction, including without limitation the rights
8
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ # copies of the Software, and to permit persons to whom the Software is
10
+ # furnished to do so, subject to the following conditions:
11
+
12
+ # The above copyright notice and this permission notice shall be included in all
13
+ # copies or substantial portions of the Software.
14
+
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ # SOFTWARE.
22
+
23
+ # File author: Shariq Farooq Bhat
24
+
25
+ import os
26
+
27
+ import numpy as np
28
+ import torch
29
+ from PIL import Image
30
+ from torch.utils.data import DataLoader, Dataset
31
+ from torchvision import transforms
32
+
33
+
34
+ class ToTensor(object):
35
+ def __init__(self):
36
+ # self.normalize = transforms.Normalize(
37
+ # mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
38
+ self.normalize = lambda x : x
39
+
40
+ def __call__(self, sample):
41
+ image, depth = sample['image'], sample['depth']
42
+ image = self.to_tensor(image)
43
+ image = self.normalize(image)
44
+ depth = self.to_tensor(depth)
45
+
46
+ return {'image': image, 'depth': depth, 'dataset': "sunrgbd"}
47
+
48
+ def to_tensor(self, pic):
49
+
50
+ if isinstance(pic, np.ndarray):
51
+ img = torch.from_numpy(pic.transpose((2, 0, 1)))
52
+ return img
53
+
54
+ # # handle PIL Image
55
+ if pic.mode == 'I':
56
+ img = torch.from_numpy(np.array(pic, np.int32, copy=False))
57
+ elif pic.mode == 'I;16':
58
+ img = torch.from_numpy(np.array(pic, np.int16, copy=False))
59
+ else:
60
+ img = torch.ByteTensor(
61
+ torch.ByteStorage.from_buffer(pic.tobytes()))
62
+ # PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK
63
+ if pic.mode == 'YCbCr':
64
+ nchannel = 3
65
+ elif pic.mode == 'I;16':
66
+ nchannel = 1
67
+ else:
68
+ nchannel = len(pic.mode)
69
+ img = img.view(pic.size[1], pic.size[0], nchannel)
70
+
71
+ img = img.transpose(0, 1).transpose(0, 2).contiguous()
72
+ if isinstance(img, torch.ByteTensor):
73
+ return img.float()
74
+ else:
75
+ return img
76
+
77
+
78
+ class SunRGBD(Dataset):
79
+ def __init__(self, data_dir_root):
80
+ # test_file_dirs = loadmat(train_test_file)['alltest'].squeeze()
81
+ # all_test = [t[0].replace("/n/fs/sun3d/data/", "") for t in test_file_dirs]
82
+ # self.all_test = [os.path.join(data_dir_root, t) for t in all_test]
83
+ import glob
84
+ self.image_files = glob.glob(
85
+ os.path.join(data_dir_root, 'rgb', 'rgb', '*'))
86
+ self.depth_files = [
87
+ r.replace("rgb/rgb", "gt/gt").replace("jpg", "png") for r in self.image_files]
88
+ self.transform = ToTensor()
89
+
90
+ def __getitem__(self, idx):
91
+ image_path = self.image_files[idx]
92
+ depth_path = self.depth_files[idx]
93
+
94
+ image = np.asarray(Image.open(image_path), dtype=np.float32) / 255.0
95
+ depth = np.asarray(Image.open(depth_path), dtype='uint16') / 1000.0
96
+ depth[depth > 8] = -1
97
+ depth = depth[..., None]
98
+ return self.transform(dict(image=image, depth=depth))
99
+
100
+ def __len__(self):
101
+ return len(self.image_files)
102
+
103
+
104
+ def get_sunrgbd_loader(data_dir_root, batch_size=1, **kwargs):
105
+ dataset = SunRGBD(data_dir_root)
106
+ return DataLoader(dataset, batch_size, **kwargs)
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/data/transforms.py ADDED
@@ -0,0 +1,481 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+
3
+ # Copyright (c) 2022 Intelligent Systems Lab Org
4
+
5
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ # of this software and associated documentation files (the "Software"), to deal
7
+ # in the Software without restriction, including without limitation the rights
8
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ # copies of the Software, and to permit persons to whom the Software is
10
+ # furnished to do so, subject to the following conditions:
11
+
12
+ # The above copyright notice and this permission notice shall be included in all
13
+ # copies or substantial portions of the Software.
14
+
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ # SOFTWARE.
22
+
23
+ # File author: Shariq Farooq Bhat
24
+
25
+ import math
26
+ import random
27
+
28
+ import cv2
29
+ import numpy as np
30
+
31
+
32
+ class RandomFliplr(object):
33
+ """Horizontal flip of the sample with given probability.
34
+ """
35
+
36
+ def __init__(self, probability=0.5):
37
+ """Init.
38
+
39
+ Args:
40
+ probability (float, optional): Flip probability. Defaults to 0.5.
41
+ """
42
+ self.__probability = probability
43
+
44
+ def __call__(self, sample):
45
+ prob = random.random()
46
+
47
+ if prob < self.__probability:
48
+ for k, v in sample.items():
49
+ if len(v.shape) >= 2:
50
+ sample[k] = np.fliplr(v).copy()
51
+
52
+ return sample
53
+
54
+
55
+ def apply_min_size(sample, size, image_interpolation_method=cv2.INTER_AREA):
56
+ """Rezise the sample to ensure the given size. Keeps aspect ratio.
57
+
58
+ Args:
59
+ sample (dict): sample
60
+ size (tuple): image size
61
+
62
+ Returns:
63
+ tuple: new size
64
+ """
65
+ shape = list(sample["disparity"].shape)
66
+
67
+ if shape[0] >= size[0] and shape[1] >= size[1]:
68
+ return sample
69
+
70
+ scale = [0, 0]
71
+ scale[0] = size[0] / shape[0]
72
+ scale[1] = size[1] / shape[1]
73
+
74
+ scale = max(scale)
75
+
76
+ shape[0] = math.ceil(scale * shape[0])
77
+ shape[1] = math.ceil(scale * shape[1])
78
+
79
+ # resize
80
+ sample["image"] = cv2.resize(
81
+ sample["image"], tuple(shape[::-1]), interpolation=image_interpolation_method
82
+ )
83
+
84
+ sample["disparity"] = cv2.resize(
85
+ sample["disparity"], tuple(shape[::-1]), interpolation=cv2.INTER_NEAREST
86
+ )
87
+ sample["mask"] = cv2.resize(
88
+ sample["mask"].astype(np.float32),
89
+ tuple(shape[::-1]),
90
+ interpolation=cv2.INTER_NEAREST,
91
+ )
92
+ sample["mask"] = sample["mask"].astype(bool)
93
+
94
+ return tuple(shape)
95
+
96
+
97
+ class RandomCrop(object):
98
+ """Get a random crop of the sample with the given size (width, height).
99
+ """
100
+
101
+ def __init__(
102
+ self,
103
+ width,
104
+ height,
105
+ resize_if_needed=False,
106
+ image_interpolation_method=cv2.INTER_AREA,
107
+ ):
108
+ """Init.
109
+
110
+ Args:
111
+ width (int): output width
112
+ height (int): output height
113
+ resize_if_needed (bool, optional): If True, sample might be upsampled to ensure
114
+ that a crop of size (width, height) is possbile. Defaults to False.
115
+ """
116
+ self.__size = (height, width)
117
+ self.__resize_if_needed = resize_if_needed
118
+ self.__image_interpolation_method = image_interpolation_method
119
+
120
+ def __call__(self, sample):
121
+
122
+ shape = sample["disparity"].shape
123
+
124
+ if self.__size[0] > shape[0] or self.__size[1] > shape[1]:
125
+ if self.__resize_if_needed:
126
+ shape = apply_min_size(
127
+ sample, self.__size, self.__image_interpolation_method
128
+ )
129
+ else:
130
+ raise Exception(
131
+ "Output size {} bigger than input size {}.".format(
132
+ self.__size, shape
133
+ )
134
+ )
135
+
136
+ offset = (
137
+ np.random.randint(shape[0] - self.__size[0] + 1),
138
+ np.random.randint(shape[1] - self.__size[1] + 1),
139
+ )
140
+
141
+ for k, v in sample.items():
142
+ if k == "code" or k == "basis":
143
+ continue
144
+
145
+ if len(sample[k].shape) >= 2:
146
+ sample[k] = v[
147
+ offset[0]: offset[0] + self.__size[0],
148
+ offset[1]: offset[1] + self.__size[1],
149
+ ]
150
+
151
+ return sample
152
+
153
+
154
+ class Resize(object):
155
+ """Resize sample to given size (width, height).
156
+ """
157
+
158
+ def __init__(
159
+ self,
160
+ width,
161
+ height,
162
+ resize_target=True,
163
+ keep_aspect_ratio=False,
164
+ ensure_multiple_of=1,
165
+ resize_method="lower_bound",
166
+ image_interpolation_method=cv2.INTER_AREA,
167
+ letter_box=False,
168
+ ):
169
+ """Init.
170
+
171
+ Args:
172
+ width (int): desired output width
173
+ height (int): desired output height
174
+ resize_target (bool, optional):
175
+ True: Resize the full sample (image, mask, target).
176
+ False: Resize image only.
177
+ Defaults to True.
178
+ keep_aspect_ratio (bool, optional):
179
+ True: Keep the aspect ratio of the input sample.
180
+ Output sample might not have the given width and height, and
181
+ resize behaviour depends on the parameter 'resize_method'.
182
+ Defaults to False.
183
+ ensure_multiple_of (int, optional):
184
+ Output width and height is constrained to be multiple of this parameter.
185
+ Defaults to 1.
186
+ resize_method (str, optional):
187
+ "lower_bound": Output will be at least as large as the given size.
188
+ "upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.)
189
+ "minimal": Scale as least as possible. (Output size might be smaller than given size.)
190
+ Defaults to "lower_bound".
191
+ """
192
+ self.__width = width
193
+ self.__height = height
194
+
195
+ self.__resize_target = resize_target
196
+ self.__keep_aspect_ratio = keep_aspect_ratio
197
+ self.__multiple_of = ensure_multiple_of
198
+ self.__resize_method = resize_method
199
+ self.__image_interpolation_method = image_interpolation_method
200
+ self.__letter_box = letter_box
201
+
202
+ def constrain_to_multiple_of(self, x, min_val=0, max_val=None):
203
+ y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int)
204
+
205
+ if max_val is not None and y > max_val:
206
+ y = (np.floor(x / self.__multiple_of)
207
+ * self.__multiple_of).astype(int)
208
+
209
+ if y < min_val:
210
+ y = (np.ceil(x / self.__multiple_of)
211
+ * self.__multiple_of).astype(int)
212
+
213
+ return y
214
+
215
+ def get_size(self, width, height):
216
+ # determine new height and width
217
+ scale_height = self.__height / height
218
+ scale_width = self.__width / width
219
+
220
+ if self.__keep_aspect_ratio:
221
+ if self.__resize_method == "lower_bound":
222
+ # scale such that output size is lower bound
223
+ if scale_width > scale_height:
224
+ # fit width
225
+ scale_height = scale_width
226
+ else:
227
+ # fit height
228
+ scale_width = scale_height
229
+ elif self.__resize_method == "upper_bound":
230
+ # scale such that output size is upper bound
231
+ if scale_width < scale_height:
232
+ # fit width
233
+ scale_height = scale_width
234
+ else:
235
+ # fit height
236
+ scale_width = scale_height
237
+ elif self.__resize_method == "minimal":
238
+ # scale as least as possbile
239
+ if abs(1 - scale_width) < abs(1 - scale_height):
240
+ # fit width
241
+ scale_height = scale_width
242
+ else:
243
+ # fit height
244
+ scale_width = scale_height
245
+ else:
246
+ raise ValueError(
247
+ f"resize_method {self.__resize_method} not implemented"
248
+ )
249
+
250
+ if self.__resize_method == "lower_bound":
251
+ new_height = self.constrain_to_multiple_of(
252
+ scale_height * height, min_val=self.__height
253
+ )
254
+ new_width = self.constrain_to_multiple_of(
255
+ scale_width * width, min_val=self.__width
256
+ )
257
+ elif self.__resize_method == "upper_bound":
258
+ new_height = self.constrain_to_multiple_of(
259
+ scale_height * height, max_val=self.__height
260
+ )
261
+ new_width = self.constrain_to_multiple_of(
262
+ scale_width * width, max_val=self.__width
263
+ )
264
+ elif self.__resize_method == "minimal":
265
+ new_height = self.constrain_to_multiple_of(scale_height * height)
266
+ new_width = self.constrain_to_multiple_of(scale_width * width)
267
+ else:
268
+ raise ValueError(
269
+ f"resize_method {self.__resize_method} not implemented")
270
+
271
+ return (new_width, new_height)
272
+
273
+ def make_letter_box(self, sample):
274
+ top = bottom = (self.__height - sample.shape[0]) // 2
275
+ left = right = (self.__width - sample.shape[1]) // 2
276
+ sample = cv2.copyMakeBorder(
277
+ sample, top, bottom, left, right, cv2.BORDER_CONSTANT, None, 0)
278
+ return sample
279
+
280
+ def __call__(self, sample):
281
+ width, height = self.get_size(
282
+ sample["image"].shape[1], sample["image"].shape[0]
283
+ )
284
+
285
+ # resize sample
286
+ sample["image"] = cv2.resize(
287
+ sample["image"],
288
+ (width, height),
289
+ interpolation=self.__image_interpolation_method,
290
+ )
291
+
292
+ if self.__letter_box:
293
+ sample["image"] = self.make_letter_box(sample["image"])
294
+
295
+ if self.__resize_target:
296
+ if "disparity" in sample:
297
+ sample["disparity"] = cv2.resize(
298
+ sample["disparity"],
299
+ (width, height),
300
+ interpolation=cv2.INTER_NEAREST,
301
+ )
302
+
303
+ if self.__letter_box:
304
+ sample["disparity"] = self.make_letter_box(
305
+ sample["disparity"])
306
+
307
+ if "depth" in sample:
308
+ sample["depth"] = cv2.resize(
309
+ sample["depth"], (width,
310
+ height), interpolation=cv2.INTER_NEAREST
311
+ )
312
+
313
+ if self.__letter_box:
314
+ sample["depth"] = self.make_letter_box(sample["depth"])
315
+
316
+ sample["mask"] = cv2.resize(
317
+ sample["mask"].astype(np.float32),
318
+ (width, height),
319
+ interpolation=cv2.INTER_NEAREST,
320
+ )
321
+
322
+ if self.__letter_box:
323
+ sample["mask"] = self.make_letter_box(sample["mask"])
324
+
325
+ sample["mask"] = sample["mask"].astype(bool)
326
+
327
+ return sample
328
+
329
+
330
+ class ResizeFixed(object):
331
+ def __init__(self, size):
332
+ self.__size = size
333
+
334
+ def __call__(self, sample):
335
+ sample["image"] = cv2.resize(
336
+ sample["image"], self.__size[::-1], interpolation=cv2.INTER_LINEAR
337
+ )
338
+
339
+ sample["disparity"] = cv2.resize(
340
+ sample["disparity"], self.__size[::-
341
+ 1], interpolation=cv2.INTER_NEAREST
342
+ )
343
+
344
+ sample["mask"] = cv2.resize(
345
+ sample["mask"].astype(np.float32),
346
+ self.__size[::-1],
347
+ interpolation=cv2.INTER_NEAREST,
348
+ )
349
+ sample["mask"] = sample["mask"].astype(bool)
350
+
351
+ return sample
352
+
353
+
354
+ class Rescale(object):
355
+ """Rescale target values to the interval [0, max_val].
356
+ If input is constant, values are set to max_val / 2.
357
+ """
358
+
359
+ def __init__(self, max_val=1.0, use_mask=True):
360
+ """Init.
361
+
362
+ Args:
363
+ max_val (float, optional): Max output value. Defaults to 1.0.
364
+ use_mask (bool, optional): Only operate on valid pixels (mask == True). Defaults to True.
365
+ """
366
+ self.__max_val = max_val
367
+ self.__use_mask = use_mask
368
+
369
+ def __call__(self, sample):
370
+ disp = sample["disparity"]
371
+
372
+ if self.__use_mask:
373
+ mask = sample["mask"]
374
+ else:
375
+ mask = np.ones_like(disp, dtype=np.bool)
376
+
377
+ if np.sum(mask) == 0:
378
+ return sample
379
+
380
+ min_val = np.min(disp[mask])
381
+ max_val = np.max(disp[mask])
382
+
383
+ if max_val > min_val:
384
+ sample["disparity"][mask] = (
385
+ (disp[mask] - min_val) / (max_val - min_val) * self.__max_val
386
+ )
387
+ else:
388
+ sample["disparity"][mask] = np.ones_like(
389
+ disp[mask]) * self.__max_val / 2.0
390
+
391
+ return sample
392
+
393
+
394
+ # mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
395
+ class NormalizeImage(object):
396
+ """Normlize image by given mean and std.
397
+ """
398
+
399
+ def __init__(self, mean, std):
400
+ self.__mean = mean
401
+ self.__std = std
402
+
403
+ def __call__(self, sample):
404
+ sample["image"] = (sample["image"] - self.__mean) / self.__std
405
+
406
+ return sample
407
+
408
+
409
+ class DepthToDisparity(object):
410
+ """Convert depth to disparity. Removes depth from sample.
411
+ """
412
+
413
+ def __init__(self, eps=1e-4):
414
+ self.__eps = eps
415
+
416
+ def __call__(self, sample):
417
+ assert "depth" in sample
418
+
419
+ sample["mask"][sample["depth"] < self.__eps] = False
420
+
421
+ sample["disparity"] = np.zeros_like(sample["depth"])
422
+ sample["disparity"][sample["depth"] >= self.__eps] = (
423
+ 1.0 / sample["depth"][sample["depth"] >= self.__eps]
424
+ )
425
+
426
+ del sample["depth"]
427
+
428
+ return sample
429
+
430
+
431
+ class DisparityToDepth(object):
432
+ """Convert disparity to depth. Removes disparity from sample.
433
+ """
434
+
435
+ def __init__(self, eps=1e-4):
436
+ self.__eps = eps
437
+
438
+ def __call__(self, sample):
439
+ assert "disparity" in sample
440
+
441
+ disp = np.abs(sample["disparity"])
442
+ sample["mask"][disp < self.__eps] = False
443
+
444
+ # print(sample["disparity"])
445
+ # print(sample["mask"].sum())
446
+ # exit()
447
+
448
+ sample["depth"] = np.zeros_like(disp)
449
+ sample["depth"][disp >= self.__eps] = (
450
+ 1.0 / disp[disp >= self.__eps]
451
+ )
452
+
453
+ del sample["disparity"]
454
+
455
+ return sample
456
+
457
+
458
+ class PrepareForNet(object):
459
+ """Prepare sample for usage as network input.
460
+ """
461
+
462
+ def __init__(self):
463
+ pass
464
+
465
+ def __call__(self, sample):
466
+ image = np.transpose(sample["image"], (2, 0, 1))
467
+ sample["image"] = np.ascontiguousarray(image).astype(np.float32)
468
+
469
+ if "mask" in sample:
470
+ sample["mask"] = sample["mask"].astype(np.float32)
471
+ sample["mask"] = np.ascontiguousarray(sample["mask"])
472
+
473
+ if "disparity" in sample:
474
+ disparity = sample["disparity"].astype(np.float32)
475
+ sample["disparity"] = np.ascontiguousarray(disparity)
476
+
477
+ if "depth" in sample:
478
+ depth = sample["depth"].astype(np.float32)
479
+ sample["depth"] = np.ascontiguousarray(depth)
480
+
481
+ return sample
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/data/vkitti.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+
3
+ # Copyright (c) 2022 Intelligent Systems Lab Org
4
+
5
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ # of this software and associated documentation files (the "Software"), to deal
7
+ # in the Software without restriction, including without limitation the rights
8
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ # copies of the Software, and to permit persons to whom the Software is
10
+ # furnished to do so, subject to the following conditions:
11
+
12
+ # The above copyright notice and this permission notice shall be included in all
13
+ # copies or substantial portions of the Software.
14
+
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ # SOFTWARE.
22
+
23
+ # File author: Shariq Farooq Bhat
24
+
25
+ import torch
26
+ from torch.utils.data import Dataset, DataLoader
27
+ from torchvision import transforms
28
+ import os
29
+
30
+ from PIL import Image
31
+ import numpy as np
32
+ import cv2
33
+
34
+
35
+ class ToTensor(object):
36
+ def __init__(self):
37
+ self.normalize = transforms.Normalize(
38
+ mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
39
+ # self.resize = transforms.Resize((375, 1242))
40
+
41
+ def __call__(self, sample):
42
+ image, depth = sample['image'], sample['depth']
43
+
44
+ image = self.to_tensor(image)
45
+ image = self.normalize(image)
46
+ depth = self.to_tensor(depth)
47
+
48
+ # image = self.resize(image)
49
+
50
+ return {'image': image, 'depth': depth, 'dataset': "vkitti"}
51
+
52
+ def to_tensor(self, pic):
53
+
54
+ if isinstance(pic, np.ndarray):
55
+ img = torch.from_numpy(pic.transpose((2, 0, 1)))
56
+ return img
57
+
58
+ # # handle PIL Image
59
+ if pic.mode == 'I':
60
+ img = torch.from_numpy(np.array(pic, np.int32, copy=False))
61
+ elif pic.mode == 'I;16':
62
+ img = torch.from_numpy(np.array(pic, np.int16, copy=False))
63
+ else:
64
+ img = torch.ByteTensor(
65
+ torch.ByteStorage.from_buffer(pic.tobytes()))
66
+ # PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK
67
+ if pic.mode == 'YCbCr':
68
+ nchannel = 3
69
+ elif pic.mode == 'I;16':
70
+ nchannel = 1
71
+ else:
72
+ nchannel = len(pic.mode)
73
+ img = img.view(pic.size[1], pic.size[0], nchannel)
74
+
75
+ img = img.transpose(0, 1).transpose(0, 2).contiguous()
76
+ if isinstance(img, torch.ByteTensor):
77
+ return img.float()
78
+ else:
79
+ return img
80
+
81
+
82
+ class VKITTI(Dataset):
83
+ def __init__(self, data_dir_root, do_kb_crop=True):
84
+ import glob
85
+ # image paths are of the form <data_dir_root>/{HR, LR}/<scene>/{color, depth_filled}/*.png
86
+ self.image_files = glob.glob(os.path.join(
87
+ data_dir_root, "test_color", '*.png'))
88
+ self.depth_files = [r.replace("test_color", "test_depth")
89
+ for r in self.image_files]
90
+ self.do_kb_crop = True
91
+ self.transform = ToTensor()
92
+
93
+ def __getitem__(self, idx):
94
+ image_path = self.image_files[idx]
95
+ depth_path = self.depth_files[idx]
96
+
97
+ image = Image.open(image_path)
98
+ depth = Image.open(depth_path)
99
+ depth = cv2.imread(depth_path, cv2.IMREAD_ANYCOLOR |
100
+ cv2.IMREAD_ANYDEPTH)
101
+ print("dpeth min max", depth.min(), depth.max())
102
+
103
+ # print(np.shape(image))
104
+ # print(np.shape(depth))
105
+
106
+ # depth[depth > 8] = -1
107
+
108
+ if self.do_kb_crop and False:
109
+ height = image.height
110
+ width = image.width
111
+ top_margin = int(height - 352)
112
+ left_margin = int((width - 1216) / 2)
113
+ depth = depth.crop(
114
+ (left_margin, top_margin, left_margin + 1216, top_margin + 352))
115
+ image = image.crop(
116
+ (left_margin, top_margin, left_margin + 1216, top_margin + 352))
117
+ # uv = uv[:, top_margin:top_margin + 352, left_margin:left_margin + 1216]
118
+
119
+ image = np.asarray(image, dtype=np.float32) / 255.0
120
+ # depth = np.asarray(depth, dtype=np.uint16) /1.
121
+ depth = depth[..., None]
122
+ sample = dict(image=image, depth=depth)
123
+
124
+ # return sample
125
+ sample = self.transform(sample)
126
+
127
+ if idx == 0:
128
+ print(sample["image"].shape)
129
+
130
+ return sample
131
+
132
+ def __len__(self):
133
+ return len(self.image_files)
134
+
135
+
136
+ def get_vkitti_loader(data_dir_root, batch_size=1, **kwargs):
137
+ dataset = VKITTI(data_dir_root)
138
+ return DataLoader(dataset, batch_size, **kwargs)
139
+
140
+
141
+ if __name__ == "__main__":
142
+ loader = get_vkitti_loader(
143
+ data_dir_root="/home/bhatsf/shortcuts/datasets/vkitti_test")
144
+ print("Total files", len(loader.dataset))
145
+ for i, sample in enumerate(loader):
146
+ print(sample["image"].shape)
147
+ print(sample["depth"].shape)
148
+ print(sample["dataset"])
149
+ print(sample['depth'].min(), sample['depth'].max())
150
+ if i > 5:
151
+ break
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/data/vkitti2.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+
3
+ # Copyright (c) 2022 Intelligent Systems Lab Org
4
+
5
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ # of this software and associated documentation files (the "Software"), to deal
7
+ # in the Software without restriction, including without limitation the rights
8
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ # copies of the Software, and to permit persons to whom the Software is
10
+ # furnished to do so, subject to the following conditions:
11
+
12
+ # The above copyright notice and this permission notice shall be included in all
13
+ # copies or substantial portions of the Software.
14
+
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ # SOFTWARE.
22
+
23
+ # File author: Shariq Farooq Bhat
24
+
25
+ import os
26
+
27
+ import cv2
28
+ import numpy as np
29
+ import torch
30
+ from PIL import Image
31
+ from torch.utils.data import DataLoader, Dataset
32
+ from torchvision import transforms
33
+
34
+
35
+ class ToTensor(object):
36
+ def __init__(self):
37
+ # self.normalize = transforms.Normalize(
38
+ # mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
39
+ self.normalize = lambda x: x
40
+ # self.resize = transforms.Resize((375, 1242))
41
+
42
+ def __call__(self, sample):
43
+ image, depth = sample['image'], sample['depth']
44
+
45
+ image = self.to_tensor(image)
46
+ image = self.normalize(image)
47
+ depth = self.to_tensor(depth)
48
+
49
+ # image = self.resize(image)
50
+
51
+ return {'image': image, 'depth': depth, 'dataset': "vkitti"}
52
+
53
+ def to_tensor(self, pic):
54
+
55
+ if isinstance(pic, np.ndarray):
56
+ img = torch.from_numpy(pic.transpose((2, 0, 1)))
57
+ return img
58
+
59
+ # # handle PIL Image
60
+ if pic.mode == 'I':
61
+ img = torch.from_numpy(np.array(pic, np.int32, copy=False))
62
+ elif pic.mode == 'I;16':
63
+ img = torch.from_numpy(np.array(pic, np.int16, copy=False))
64
+ else:
65
+ img = torch.ByteTensor(
66
+ torch.ByteStorage.from_buffer(pic.tobytes()))
67
+ # PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK
68
+ if pic.mode == 'YCbCr':
69
+ nchannel = 3
70
+ elif pic.mode == 'I;16':
71
+ nchannel = 1
72
+ else:
73
+ nchannel = len(pic.mode)
74
+ img = img.view(pic.size[1], pic.size[0], nchannel)
75
+
76
+ img = img.transpose(0, 1).transpose(0, 2).contiguous()
77
+ if isinstance(img, torch.ByteTensor):
78
+ return img.float()
79
+ else:
80
+ return img
81
+
82
+
83
+ class VKITTI2(Dataset):
84
+ def __init__(self, data_dir_root, do_kb_crop=True, split="test"):
85
+ import glob
86
+
87
+ # image paths are of the form <data_dir_root>/rgb/<scene>/<variant>/frames/<rgb,depth>/Camera<0,1>/rgb_{}.jpg
88
+ self.image_files = glob.glob(os.path.join(
89
+ data_dir_root, "rgb", "**", "frames", "rgb", "Camera_0", '*.jpg'), recursive=True)
90
+ self.depth_files = [r.replace("/rgb/", "/depth/").replace(
91
+ "rgb_", "depth_").replace(".jpg", ".png") for r in self.image_files]
92
+ self.do_kb_crop = True
93
+ self.transform = ToTensor()
94
+
95
+ # If train test split is not created, then create one.
96
+ # Split is such that 8% of the frames from each scene are used for testing.
97
+ if not os.path.exists(os.path.join(data_dir_root, "train.txt")):
98
+ import random
99
+ scenes = set([os.path.basename(os.path.dirname(
100
+ os.path.dirname(os.path.dirname(f)))) for f in self.image_files])
101
+ train_files = []
102
+ test_files = []
103
+ for scene in scenes:
104
+ scene_files = [f for f in self.image_files if os.path.basename(
105
+ os.path.dirname(os.path.dirname(os.path.dirname(f)))) == scene]
106
+ random.shuffle(scene_files)
107
+ train_files.extend(scene_files[:int(len(scene_files) * 0.92)])
108
+ test_files.extend(scene_files[int(len(scene_files) * 0.92):])
109
+ with open(os.path.join(data_dir_root, "train.txt"), "w") as f:
110
+ f.write("\n".join(train_files))
111
+ with open(os.path.join(data_dir_root, "test.txt"), "w") as f:
112
+ f.write("\n".join(test_files))
113
+
114
+ if split == "train":
115
+ with open(os.path.join(data_dir_root, "train.txt"), "r") as f:
116
+ self.image_files = f.read().splitlines()
117
+ self.depth_files = [r.replace("/rgb/", "/depth/").replace(
118
+ "rgb_", "depth_").replace(".jpg", ".png") for r in self.image_files]
119
+ elif split == "test":
120
+ with open(os.path.join(data_dir_root, "test.txt"), "r") as f:
121
+ self.image_files = f.read().splitlines()
122
+ self.depth_files = [r.replace("/rgb/", "/depth/").replace(
123
+ "rgb_", "depth_").replace(".jpg", ".png") for r in self.image_files]
124
+
125
+ def __getitem__(self, idx):
126
+ image_path = self.image_files[idx]
127
+ depth_path = self.depth_files[idx]
128
+
129
+ image = Image.open(image_path)
130
+ # depth = Image.open(depth_path)
131
+ depth = cv2.imread(depth_path, cv2.IMREAD_ANYCOLOR |
132
+ cv2.IMREAD_ANYDEPTH) / 100.0 # cm to m
133
+ depth = Image.fromarray(depth)
134
+ # print("dpeth min max", depth.min(), depth.max())
135
+
136
+ # print(np.shape(image))
137
+ # print(np.shape(depth))
138
+
139
+ if self.do_kb_crop:
140
+ if idx == 0:
141
+ print("Using KB input crop")
142
+ height = image.height
143
+ width = image.width
144
+ top_margin = int(height - 352)
145
+ left_margin = int((width - 1216) / 2)
146
+ depth = depth.crop(
147
+ (left_margin, top_margin, left_margin + 1216, top_margin + 352))
148
+ image = image.crop(
149
+ (left_margin, top_margin, left_margin + 1216, top_margin + 352))
150
+ # uv = uv[:, top_margin:top_margin + 352, left_margin:left_margin + 1216]
151
+
152
+ image = np.asarray(image, dtype=np.float32) / 255.0
153
+ # depth = np.asarray(depth, dtype=np.uint16) /1.
154
+ depth = np.asarray(depth, dtype=np.float32) / 1.
155
+ depth[depth > 80] = -1
156
+
157
+ depth = depth[..., None]
158
+ sample = dict(image=image, depth=depth)
159
+
160
+ # return sample
161
+ sample = self.transform(sample)
162
+
163
+ if idx == 0:
164
+ print(sample["image"].shape)
165
+
166
+ return sample
167
+
168
+ def __len__(self):
169
+ return len(self.image_files)
170
+
171
+
172
+ def get_vkitti2_loader(data_dir_root, batch_size=1, **kwargs):
173
+ dataset = VKITTI2(data_dir_root)
174
+ return DataLoader(dataset, batch_size, **kwargs)
175
+
176
+
177
+ if __name__ == "__main__":
178
+ loader = get_vkitti2_loader(
179
+ data_dir_root="/home/bhatsf/shortcuts/datasets/vkitti2")
180
+ print("Total files", len(loader.dataset))
181
+ for i, sample in enumerate(loader):
182
+ print(sample["image"].shape)
183
+ print(sample["depth"].shape)
184
+ print(sample["dataset"])
185
+ print(sample['depth'].min(), sample['depth'].max())
186
+ if i > 5:
187
+ break
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (179 Bytes). View file
 
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/__pycache__/depth_model.cpython-39.pyc ADDED
Binary file (6.33 kB). View file
 
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/__pycache__/model_io.cpython-39.pyc ADDED
Binary file (2.29 kB). View file
 
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (191 Bytes). View file
 
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/__pycache__/midas.cpython-39.pyc ADDED
Binary file (11.8 kB). View file
 
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/levit.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import timm
2
+ import torch
3
+ import torch.nn as nn
4
+ import numpy as np
5
+
6
+ from .utils import activations, get_activation, Transpose
7
+
8
+
9
+ def forward_levit(pretrained, x):
10
+ pretrained.model.forward_features(x)
11
+
12
+ layer_1 = pretrained.activations["1"]
13
+ layer_2 = pretrained.activations["2"]
14
+ layer_3 = pretrained.activations["3"]
15
+
16
+ layer_1 = pretrained.act_postprocess1(layer_1)
17
+ layer_2 = pretrained.act_postprocess2(layer_2)
18
+ layer_3 = pretrained.act_postprocess3(layer_3)
19
+
20
+ return layer_1, layer_2, layer_3
21
+
22
+
23
+ def _make_levit_backbone(
24
+ model,
25
+ hooks=[3, 11, 21],
26
+ patch_grid=[14, 14]
27
+ ):
28
+ pretrained = nn.Module()
29
+
30
+ pretrained.model = model
31
+ pretrained.model.blocks[hooks[0]].register_forward_hook(get_activation("1"))
32
+ pretrained.model.blocks[hooks[1]].register_forward_hook(get_activation("2"))
33
+ pretrained.model.blocks[hooks[2]].register_forward_hook(get_activation("3"))
34
+
35
+ pretrained.activations = activations
36
+
37
+ patch_grid_size = np.array(patch_grid, dtype=int)
38
+
39
+ pretrained.act_postprocess1 = nn.Sequential(
40
+ Transpose(1, 2),
41
+ nn.Unflatten(2, torch.Size(patch_grid_size.tolist()))
42
+ )
43
+ pretrained.act_postprocess2 = nn.Sequential(
44
+ Transpose(1, 2),
45
+ nn.Unflatten(2, torch.Size((np.ceil(patch_grid_size / 2).astype(int)).tolist()))
46
+ )
47
+ pretrained.act_postprocess3 = nn.Sequential(
48
+ Transpose(1, 2),
49
+ nn.Unflatten(2, torch.Size((np.ceil(patch_grid_size / 4).astype(int)).tolist()))
50
+ )
51
+
52
+ return pretrained
53
+
54
+
55
+ class ConvTransposeNorm(nn.Sequential):
56
+ """
57
+ Modification of
58
+ https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/levit.py: ConvNorm
59
+ such that ConvTranspose2d is used instead of Conv2d.
60
+ """
61
+
62
+ def __init__(
63
+ self, in_chs, out_chs, kernel_size=1, stride=1, pad=0, dilation=1,
64
+ groups=1, bn_weight_init=1):
65
+ super().__init__()
66
+ self.add_module('c',
67
+ nn.ConvTranspose2d(in_chs, out_chs, kernel_size, stride, pad, dilation, groups, bias=False))
68
+ self.add_module('bn', nn.BatchNorm2d(out_chs))
69
+
70
+ nn.init.constant_(self.bn.weight, bn_weight_init)
71
+
72
+ @torch.no_grad()
73
+ def fuse(self):
74
+ c, bn = self._modules.values()
75
+ w = bn.weight / (bn.running_var + bn.eps) ** 0.5
76
+ w = c.weight * w[:, None, None, None]
77
+ b = bn.bias - bn.running_mean * bn.weight / (bn.running_var + bn.eps) ** 0.5
78
+ m = nn.ConvTranspose2d(
79
+ w.size(1), w.size(0), w.shape[2:], stride=self.c.stride,
80
+ padding=self.c.padding, dilation=self.c.dilation, groups=self.c.groups)
81
+ m.weight.data.copy_(w)
82
+ m.bias.data.copy_(b)
83
+ return m
84
+
85
+
86
+ def stem_b4_transpose(in_chs, out_chs, activation):
87
+ """
88
+ Modification of
89
+ https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/levit.py: stem_b16
90
+ such that ConvTranspose2d is used instead of Conv2d and stem is also reduced to the half.
91
+ """
92
+ return nn.Sequential(
93
+ ConvTransposeNorm(in_chs, out_chs, 3, 2, 1),
94
+ activation(),
95
+ ConvTransposeNorm(out_chs, out_chs // 2, 3, 2, 1),
96
+ activation())
97
+
98
+
99
+ def _make_pretrained_levit_384(pretrained, hooks=None):
100
+ model = timm.create_model("levit_384", pretrained=pretrained)
101
+
102
+ hooks = [3, 11, 21] if hooks == None else hooks
103
+ return _make_levit_backbone(
104
+ model,
105
+ hooks=hooks
106
+ )
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/next_vit.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import timm
2
+
3
+ import torch.nn as nn
4
+
5
+ from pathlib import Path
6
+ from .utils import activations, forward_default, get_activation
7
+
8
+ from ..external.next_vit.classification.nextvit import *
9
+
10
+
11
+ def forward_next_vit(pretrained, x):
12
+ return forward_default(pretrained, x, "forward")
13
+
14
+
15
+ def _make_next_vit_backbone(
16
+ model,
17
+ hooks=[2, 6, 36, 39],
18
+ ):
19
+ pretrained = nn.Module()
20
+
21
+ pretrained.model = model
22
+ pretrained.model.features[hooks[0]].register_forward_hook(get_activation("1"))
23
+ pretrained.model.features[hooks[1]].register_forward_hook(get_activation("2"))
24
+ pretrained.model.features[hooks[2]].register_forward_hook(get_activation("3"))
25
+ pretrained.model.features[hooks[3]].register_forward_hook(get_activation("4"))
26
+
27
+ pretrained.activations = activations
28
+
29
+ return pretrained
30
+
31
+
32
+ def _make_pretrained_next_vit_large_6m(hooks=None):
33
+ model = timm.create_model("nextvit_large")
34
+
35
+ hooks = [2, 6, 36, 39] if hooks == None else hooks
36
+ return _make_next_vit_backbone(
37
+ model,
38
+ hooks=hooks,
39
+ )
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/swin.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import timm
2
+
3
+ from .swin_common import _make_swin_backbone
4
+
5
+
6
+ def _make_pretrained_swinl12_384(pretrained, hooks=None):
7
+ model = timm.create_model("swin_large_patch4_window12_384", pretrained=pretrained)
8
+
9
+ hooks = [1, 1, 17, 1] if hooks == None else hooks
10
+ return _make_swin_backbone(
11
+ model,
12
+ hooks=hooks
13
+ )
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/swin_common.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ import torch.nn as nn
4
+ import numpy as np
5
+
6
+ from .utils import activations, forward_default, get_activation, Transpose
7
+
8
+
9
+ def forward_swin(pretrained, x):
10
+ return forward_default(pretrained, x)
11
+
12
+
13
+ def _make_swin_backbone(
14
+ model,
15
+ hooks=[1, 1, 17, 1],
16
+ patch_grid=[96, 96]
17
+ ):
18
+ pretrained = nn.Module()
19
+
20
+ pretrained.model = model
21
+ pretrained.model.layers[0].blocks[hooks[0]].register_forward_hook(get_activation("1"))
22
+ pretrained.model.layers[1].blocks[hooks[1]].register_forward_hook(get_activation("2"))
23
+ pretrained.model.layers[2].blocks[hooks[2]].register_forward_hook(get_activation("3"))
24
+ pretrained.model.layers[3].blocks[hooks[3]].register_forward_hook(get_activation("4"))
25
+
26
+ pretrained.activations = activations
27
+
28
+ if hasattr(model, "patch_grid"):
29
+ used_patch_grid = model.patch_grid
30
+ else:
31
+ used_patch_grid = patch_grid
32
+
33
+ patch_grid_size = np.array(used_patch_grid, dtype=int)
34
+
35
+ pretrained.act_postprocess1 = nn.Sequential(
36
+ Transpose(1, 2),
37
+ nn.Unflatten(2, torch.Size(patch_grid_size.tolist()))
38
+ )
39
+ pretrained.act_postprocess2 = nn.Sequential(
40
+ Transpose(1, 2),
41
+ nn.Unflatten(2, torch.Size((patch_grid_size // 2).tolist()))
42
+ )
43
+ pretrained.act_postprocess3 = nn.Sequential(
44
+ Transpose(1, 2),
45
+ nn.Unflatten(2, torch.Size((patch_grid_size // 4).tolist()))
46
+ )
47
+ pretrained.act_postprocess4 = nn.Sequential(
48
+ Transpose(1, 2),
49
+ nn.Unflatten(2, torch.Size((patch_grid_size // 8).tolist()))
50
+ )
51
+
52
+ return pretrained
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/midas/backbones/vit.py ADDED
@@ -0,0 +1,221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import timm
4
+ import types
5
+ import math
6
+ import torch.nn.functional as F
7
+
8
+ from .utils import (activations, forward_adapted_unflatten, get_activation, get_readout_oper,
9
+ make_backbone_default, Transpose)
10
+
11
+
12
+ def forward_vit(pretrained, x):
13
+ return forward_adapted_unflatten(pretrained, x, "forward_flex")
14
+
15
+
16
+ def _resize_pos_embed(self, posemb, gs_h, gs_w):
17
+ posemb_tok, posemb_grid = (
18
+ posemb[:, : self.start_index],
19
+ posemb[0, self.start_index:],
20
+ )
21
+
22
+ gs_old = int(math.sqrt(len(posemb_grid)))
23
+
24
+ posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
25
+ posemb_grid = F.interpolate(posemb_grid, size=(gs_h, gs_w), mode="bilinear")
26
+ posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_h * gs_w, -1)
27
+
28
+ posemb = torch.cat([posemb_tok, posemb_grid], dim=1)
29
+
30
+ return posemb
31
+
32
+
33
+ def forward_flex(self, x):
34
+ b, c, h, w = x.shape
35
+
36
+ pos_embed = self._resize_pos_embed(
37
+ self.pos_embed, h // self.patch_size[1], w // self.patch_size[0]
38
+ )
39
+
40
+ B = x.shape[0]
41
+
42
+ if hasattr(self.patch_embed, "backbone"):
43
+ x = self.patch_embed.backbone(x)
44
+ if isinstance(x, (list, tuple)):
45
+ x = x[-1] # last feature if backbone outputs list/tuple of features
46
+
47
+ x = self.patch_embed.proj(x).flatten(2).transpose(1, 2)
48
+
49
+ if getattr(self, "dist_token", None) is not None:
50
+ cls_tokens = self.cls_token.expand(
51
+ B, -1, -1
52
+ ) # stole cls_tokens impl from Phil Wang, thanks
53
+ dist_token = self.dist_token.expand(B, -1, -1)
54
+ x = torch.cat((cls_tokens, dist_token, x), dim=1)
55
+ else:
56
+ if self.no_embed_class:
57
+ x = x + pos_embed
58
+ cls_tokens = self.cls_token.expand(
59
+ B, -1, -1
60
+ ) # stole cls_tokens impl from Phil Wang, thanks
61
+ x = torch.cat((cls_tokens, x), dim=1)
62
+
63
+ if not self.no_embed_class:
64
+ x = x + pos_embed
65
+ x = self.pos_drop(x)
66
+
67
+ for blk in self.blocks:
68
+ x = blk(x)
69
+
70
+ x = self.norm(x)
71
+
72
+ return x
73
+
74
+
75
+ def _make_vit_b16_backbone(
76
+ model,
77
+ features=[96, 192, 384, 768],
78
+ size=[384, 384],
79
+ hooks=[2, 5, 8, 11],
80
+ vit_features=768,
81
+ use_readout="ignore",
82
+ start_index=1,
83
+ start_index_readout=1,
84
+ ):
85
+ pretrained = make_backbone_default(model, features, size, hooks, vit_features, use_readout, start_index,
86
+ start_index_readout)
87
+
88
+ # We inject this function into the VisionTransformer instances so that
89
+ # we can use it with interpolated position embeddings without modifying the library source.
90
+ pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model)
91
+ pretrained.model._resize_pos_embed = types.MethodType(
92
+ _resize_pos_embed, pretrained.model
93
+ )
94
+
95
+ return pretrained
96
+
97
+
98
+ def _make_pretrained_vitl16_384(pretrained, use_readout="ignore", hooks=None):
99
+ model = timm.create_model("vit_large_patch16_384", pretrained=pretrained)
100
+
101
+ hooks = [5, 11, 17, 23] if hooks == None else hooks
102
+ return _make_vit_b16_backbone(
103
+ model,
104
+ features=[256, 512, 1024, 1024],
105
+ hooks=hooks,
106
+ vit_features=1024,
107
+ use_readout=use_readout,
108
+ )
109
+
110
+
111
+ def _make_pretrained_vitb16_384(pretrained, use_readout="ignore", hooks=None):
112
+ model = timm.create_model("vit_base_patch16_384", pretrained=pretrained)
113
+
114
+ hooks = [2, 5, 8, 11] if hooks == None else hooks
115
+ return _make_vit_b16_backbone(
116
+ model, features=[96, 192, 384, 768], hooks=hooks, use_readout=use_readout
117
+ )
118
+
119
+
120
+ def _make_vit_b_rn50_backbone(
121
+ model,
122
+ features=[256, 512, 768, 768],
123
+ size=[384, 384],
124
+ hooks=[0, 1, 8, 11],
125
+ vit_features=768,
126
+ patch_size=[16, 16],
127
+ number_stages=2,
128
+ use_vit_only=False,
129
+ use_readout="ignore",
130
+ start_index=1,
131
+ ):
132
+ pretrained = nn.Module()
133
+
134
+ pretrained.model = model
135
+
136
+ used_number_stages = 0 if use_vit_only else number_stages
137
+ for s in range(used_number_stages):
138
+ pretrained.model.patch_embed.backbone.stages[s].register_forward_hook(
139
+ get_activation(str(s + 1))
140
+ )
141
+ for s in range(used_number_stages, 4):
142
+ pretrained.model.blocks[hooks[s]].register_forward_hook(get_activation(str(s + 1)))
143
+
144
+ pretrained.activations = activations
145
+
146
+ readout_oper = get_readout_oper(vit_features, features, use_readout, start_index)
147
+
148
+ for s in range(used_number_stages):
149
+ value = nn.Sequential(nn.Identity(), nn.Identity(), nn.Identity())
150
+ exec(f"pretrained.act_postprocess{s + 1}=value")
151
+ for s in range(used_number_stages, 4):
152
+ if s < number_stages:
153
+ final_layer = nn.ConvTranspose2d(
154
+ in_channels=features[s],
155
+ out_channels=features[s],
156
+ kernel_size=4 // (2 ** s),
157
+ stride=4 // (2 ** s),
158
+ padding=0,
159
+ bias=True,
160
+ dilation=1,
161
+ groups=1,
162
+ )
163
+ elif s > number_stages:
164
+ final_layer = nn.Conv2d(
165
+ in_channels=features[3],
166
+ out_channels=features[3],
167
+ kernel_size=3,
168
+ stride=2,
169
+ padding=1,
170
+ )
171
+ else:
172
+ final_layer = None
173
+
174
+ layers = [
175
+ readout_oper[s],
176
+ Transpose(1, 2),
177
+ nn.Unflatten(2, torch.Size([size[0] // 16, size[1] // 16])),
178
+ nn.Conv2d(
179
+ in_channels=vit_features,
180
+ out_channels=features[s],
181
+ kernel_size=1,
182
+ stride=1,
183
+ padding=0,
184
+ ),
185
+ ]
186
+ if final_layer is not None:
187
+ layers.append(final_layer)
188
+
189
+ value = nn.Sequential(*layers)
190
+ exec(f"pretrained.act_postprocess{s + 1}=value")
191
+
192
+ pretrained.model.start_index = start_index
193
+ pretrained.model.patch_size = patch_size
194
+
195
+ # We inject this function into the VisionTransformer instances so that
196
+ # we can use it with interpolated position embeddings without modifying the library source.
197
+ pretrained.model.forward_flex = types.MethodType(forward_flex, pretrained.model)
198
+
199
+ # We inject this function into the VisionTransformer instances so that
200
+ # we can use it with interpolated position embeddings without modifying the library source.
201
+ pretrained.model._resize_pos_embed = types.MethodType(
202
+ _resize_pos_embed, pretrained.model
203
+ )
204
+
205
+ return pretrained
206
+
207
+
208
+ def _make_pretrained_vitb_rn50_384(
209
+ pretrained, use_readout="ignore", hooks=None, use_vit_only=False
210
+ ):
211
+ model = timm.create_model("vit_base_resnet50_384", pretrained=pretrained)
212
+
213
+ hooks = [0, 1, 8, 11] if hooks == None else hooks
214
+ return _make_vit_b_rn50_backbone(
215
+ model,
216
+ features=[256, 512, 768, 768],
217
+ size=[384, 384],
218
+ hooks=hooks,
219
+ use_vit_only=use_vit_only,
220
+ use_readout=use_readout,
221
+ )
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/android/app/src/androidTest/assets/fox-mobilenet_v1_1.0_224_support.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ red_fox 0.79403335
2
+ kit_fox 0.16753247
3
+ grey_fox 0.03619214
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/android/app/src/androidTest/assets/fox-mobilenet_v1_1.0_224_task_api.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ red_fox 0.85
2
+ kit_fox 0.13
3
+ grey_fox 0.02
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/android/app/src/androidTest/java/AndroidManifest.xml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="utf-8"?>
2
+ <manifest xmlns:android="http://schemas.android.com/apk/res/android"
3
+ package="org.tensorflow.lite.examples.classification">
4
+ <uses-sdk />
5
+ </manifest>
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/android/app/src/androidTest/java/org/tensorflow/lite/examples/classification/ClassifierTest.java ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ * Copyright 2019 The TensorFlow Authors. All Rights Reserved.
3
+ *
4
+ * Licensed under the Apache License, Version 2.0 (the "License");
5
+ * you may not use this file except in compliance with the License.
6
+ * You may obtain a copy of the License at
7
+ *
8
+ * http://www.apache.org/licenses/LICENSE-2.0
9
+ *
10
+ * Unless required by applicable law or agreed to in writing, software
11
+ * distributed under the License is distributed on an "AS IS" BASIS,
12
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ * See the License for the specific language governing permissions and
14
+ * limitations under the License.
15
+ */
16
+
17
+ package org.tensorflow.lite.examples.classification;
18
+
19
+ import static com.google.common.truth.Truth.assertThat;
20
+
21
+ import android.content.res.AssetManager;
22
+ import android.graphics.Bitmap;
23
+ import android.graphics.BitmapFactory;
24
+ import android.util.Log;
25
+ import androidx.test.ext.junit.runners.AndroidJUnit4;
26
+ import androidx.test.platform.app.InstrumentationRegistry;
27
+ import androidx.test.rule.ActivityTestRule;
28
+ import java.io.IOException;
29
+ import java.io.InputStream;
30
+ import java.util.ArrayList;
31
+ import java.util.Iterator;
32
+ import java.util.List;
33
+ import java.util.Scanner;
34
+ import org.junit.Assert;
35
+ import org.junit.Rule;
36
+ import org.junit.Test;
37
+ import org.junit.runner.RunWith;
38
+ import org.tensorflow.lite.examples.classification.tflite.Classifier;
39
+ import org.tensorflow.lite.examples.classification.tflite.Classifier.Device;
40
+ import org.tensorflow.lite.examples.classification.tflite.Classifier.Model;
41
+ import org.tensorflow.lite.examples.classification.tflite.Classifier.Recognition;
42
+
43
+ /** Golden test for Image Classification Reference app. */
44
+ @RunWith(AndroidJUnit4.class)
45
+ public class ClassifierTest {
46
+
47
+ @Rule
48
+ public ActivityTestRule<ClassifierActivity> rule =
49
+ new ActivityTestRule<>(ClassifierActivity.class);
50
+
51
+ private static final String[] INPUTS = {"fox.jpg"};
52
+ private static final String[] GOLDEN_OUTPUTS_SUPPORT = {"fox-mobilenet_v1_1.0_224_support.txt"};
53
+ private static final String[] GOLDEN_OUTPUTS_TASK = {"fox-mobilenet_v1_1.0_224_task_api.txt"};
54
+
55
+ @Test
56
+ public void classificationResultsShouldNotChange() throws IOException {
57
+ ClassifierActivity activity = rule.getActivity();
58
+ Classifier classifier = Classifier.create(activity, Model.FLOAT_MOBILENET, Device.CPU, 1);
59
+ for (int i = 0; i < INPUTS.length; i++) {
60
+ String imageFileName = INPUTS[i];
61
+ String goldenOutputFileName;
62
+ // TODO(b/169379396): investigate the impact of the resize algorithm on accuracy.
63
+ // This is a temporary workaround to set different golden rest results as the preprocessing
64
+ // of lib_support and lib_task_api are different. Will merge them once the above TODO is
65
+ // resolved.
66
+ if (Classifier.TAG.equals("ClassifierWithSupport")) {
67
+ goldenOutputFileName = GOLDEN_OUTPUTS_SUPPORT[i];
68
+ } else {
69
+ goldenOutputFileName = GOLDEN_OUTPUTS_TASK[i];
70
+ }
71
+ Bitmap input = loadImage(imageFileName);
72
+ List<Recognition> goldenOutput = loadRecognitions(goldenOutputFileName);
73
+
74
+ List<Recognition> result = classifier.recognizeImage(input, 0);
75
+ Iterator<Recognition> goldenOutputIterator = goldenOutput.iterator();
76
+
77
+ for (Recognition actual : result) {
78
+ Assert.assertTrue(goldenOutputIterator.hasNext());
79
+ Recognition expected = goldenOutputIterator.next();
80
+ assertThat(actual.getTitle()).isEqualTo(expected.getTitle());
81
+ assertThat(actual.getConfidence()).isWithin(0.01f).of(expected.getConfidence());
82
+ }
83
+ }
84
+ }
85
+
86
+ private static Bitmap loadImage(String fileName) {
87
+ AssetManager assetManager =
88
+ InstrumentationRegistry.getInstrumentation().getContext().getAssets();
89
+ InputStream inputStream = null;
90
+ try {
91
+ inputStream = assetManager.open(fileName);
92
+ } catch (IOException e) {
93
+ Log.e("Test", "Cannot load image from assets");
94
+ }
95
+ return BitmapFactory.decodeStream(inputStream);
96
+ }
97
+
98
+ private static List<Recognition> loadRecognitions(String fileName) {
99
+ AssetManager assetManager =
100
+ InstrumentationRegistry.getInstrumentation().getContext().getAssets();
101
+ InputStream inputStream = null;
102
+ try {
103
+ inputStream = assetManager.open(fileName);
104
+ } catch (IOException e) {
105
+ Log.e("Test", "Cannot load probability results from assets");
106
+ }
107
+ Scanner scanner = new Scanner(inputStream);
108
+ List<Recognition> result = new ArrayList<>();
109
+ while (scanner.hasNext()) {
110
+ String category = scanner.next();
111
+ category = category.replace('_', ' ');
112
+ if (!scanner.hasNextFloat()) {
113
+ break;
114
+ }
115
+ float probability = scanner.nextFloat();
116
+ Recognition recognition = new Recognition(null, category, probability, null);
117
+ result.add(recognition);
118
+ }
119
+ return result;
120
+ }
121
+ }
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/android/app/src/main/res/drawable-v24/ic_launcher_foreground.xml ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <vector xmlns:android="http://schemas.android.com/apk/res/android"
2
+ xmlns:aapt="http://schemas.android.com/aapt"
3
+ android:width="108dp"
4
+ android:height="108dp"
5
+ android:viewportHeight="108"
6
+ android:viewportWidth="108">
7
+ <path
8
+ android:fillType="evenOdd"
9
+ android:pathData="M32,64C32,64 38.39,52.99 44.13,50.95C51.37,48.37 70.14,49.57 70.14,49.57L108.26,87.69L108,109.01L75.97,107.97L32,64Z"
10
+ android:strokeColor="#00000000"
11
+ android:strokeWidth="1">
12
+ <aapt:attr name="android:fillColor">
13
+ <gradient
14
+ android:endX="78.5885"
15
+ android:endY="90.9159"
16
+ android:startX="48.7653"
17
+ android:startY="61.0927"
18
+ android:type="linear">
19
+ <item
20
+ android:color="#44000000"
21
+ android:offset="0.0"/>
22
+ <item
23
+ android:color="#00000000"
24
+ android:offset="1.0"/>
25
+ </gradient>
26
+ </aapt:attr>
27
+ </path>
28
+ <path
29
+ android:fillColor="#FFFFFF"
30
+ android:fillType="nonZero"
31
+ android:pathData="M66.94,46.02L66.94,46.02C72.44,50.07 76,56.61 76,64L32,64C32,56.61 35.56,50.11 40.98,46.06L36.18,41.19C35.45,40.45 35.45,39.3 36.18,38.56C36.91,37.81 38.05,37.81 38.78,38.56L44.25,44.05C47.18,42.57 50.48,41.71 54,41.71C57.48,41.71 60.78,42.57 63.68,44.05L69.11,38.56C69.84,37.81 70.98,37.81 71.71,38.56C72.44,39.3 72.44,40.45 71.71,41.19L66.94,46.02ZM62.94,56.92C64.08,56.92 65,56.01 65,54.88C65,53.76 64.08,52.85 62.94,52.85C61.8,52.85 60.88,53.76 60.88,54.88C60.88,56.01 61.8,56.92 62.94,56.92ZM45.06,56.92C46.2,56.92 47.13,56.01 47.13,54.88C47.13,53.76 46.2,52.85 45.06,52.85C43.92,52.85 43,53.76 43,54.88C43,56.01 43.92,56.92 45.06,56.92Z"
32
+ android:strokeColor="#00000000"
33
+ android:strokeWidth="1"/>
34
+ </vector>
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/android/lib_task_api/src/main/java/org/tensorflow/lite/examples/classification/tflite/Classifier.java ADDED
@@ -0,0 +1,278 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ package org.tensorflow.lite.examples.classification.tflite;
17
+
18
+ import static java.lang.Math.min;
19
+
20
+ import android.app.Activity;
21
+ import android.graphics.Bitmap;
22
+ import android.graphics.Rect;
23
+ import android.graphics.RectF;
24
+ import android.os.SystemClock;
25
+ import android.os.Trace;
26
+ import android.util.Log;
27
+ import java.io.IOException;
28
+ import java.nio.MappedByteBuffer;
29
+ import java.util.ArrayList;
30
+ import java.util.List;
31
+ import org.tensorflow.lite.examples.classification.tflite.Classifier.Device;
32
+ import org.tensorflow.lite.support.common.FileUtil;
33
+ import org.tensorflow.lite.support.image.TensorImage;
34
+ import org.tensorflow.lite.support.label.Category;
35
+ import org.tensorflow.lite.support.metadata.MetadataExtractor;
36
+ import org.tensorflow.lite.task.core.vision.ImageProcessingOptions;
37
+ import org.tensorflow.lite.task.core.vision.ImageProcessingOptions.Orientation;
38
+ import org.tensorflow.lite.task.vision.classifier.Classifications;
39
+ import org.tensorflow.lite.task.vision.classifier.ImageClassifier;
40
+ import org.tensorflow.lite.task.vision.classifier.ImageClassifier.ImageClassifierOptions;
41
+
42
+ /** A classifier specialized to label images using TensorFlow Lite. */
43
+ public abstract class Classifier {
44
+ public static final String TAG = "ClassifierWithTaskApi";
45
+
46
+ /** The model type used for classification. */
47
+ public enum Model {
48
+ FLOAT_MOBILENET,
49
+ QUANTIZED_MOBILENET,
50
+ FLOAT_EFFICIENTNET,
51
+ QUANTIZED_EFFICIENTNET
52
+ }
53
+
54
+ /** The runtime device type used for executing classification. */
55
+ public enum Device {
56
+ CPU,
57
+ NNAPI,
58
+ GPU
59
+ }
60
+
61
+ /** Number of results to show in the UI. */
62
+ private static final int MAX_RESULTS = 3;
63
+
64
+ /** Image size along the x axis. */
65
+ private final int imageSizeX;
66
+
67
+ /** Image size along the y axis. */
68
+ private final int imageSizeY;
69
+ /** An instance of the driver class to run model inference with Tensorflow Lite. */
70
+ protected final ImageClassifier imageClassifier;
71
+
72
+ /**
73
+ * Creates a classifier with the provided configuration.
74
+ *
75
+ * @param activity The current Activity.
76
+ * @param model The model to use for classification.
77
+ * @param device The device to use for classification.
78
+ * @param numThreads The number of threads to use for classification.
79
+ * @return A classifier with the desired configuration.
80
+ */
81
+ public static Classifier create(Activity activity, Model model, Device device, int numThreads)
82
+ throws IOException {
83
+ if (model == Model.QUANTIZED_MOBILENET) {
84
+ return new ClassifierQuantizedMobileNet(activity, device, numThreads);
85
+ } else if (model == Model.FLOAT_MOBILENET) {
86
+ return new ClassifierFloatMobileNet(activity, device, numThreads);
87
+ } else if (model == Model.FLOAT_EFFICIENTNET) {
88
+ return new ClassifierFloatEfficientNet(activity, device, numThreads);
89
+ } else if (model == Model.QUANTIZED_EFFICIENTNET) {
90
+ return new ClassifierQuantizedEfficientNet(activity, device, numThreads);
91
+ } else {
92
+ throw new UnsupportedOperationException();
93
+ }
94
+ }
95
+
96
+ /** An immutable result returned by a Classifier describing what was recognized. */
97
+ public static class Recognition {
98
+ /**
99
+ * A unique identifier for what has been recognized. Specific to the class, not the instance of
100
+ * the object.
101
+ */
102
+ private final String id;
103
+
104
+ /** Display name for the recognition. */
105
+ private final String title;
106
+
107
+ /**
108
+ * A sortable score for how good the recognition is relative to others. Higher should be better.
109
+ */
110
+ private final Float confidence;
111
+
112
+ /** Optional location within the source image for the location of the recognized object. */
113
+ private RectF location;
114
+
115
+ public Recognition(
116
+ final String id, final String title, final Float confidence, final RectF location) {
117
+ this.id = id;
118
+ this.title = title;
119
+ this.confidence = confidence;
120
+ this.location = location;
121
+ }
122
+
123
+ public String getId() {
124
+ return id;
125
+ }
126
+
127
+ public String getTitle() {
128
+ return title;
129
+ }
130
+
131
+ public Float getConfidence() {
132
+ return confidence;
133
+ }
134
+
135
+ public RectF getLocation() {
136
+ return new RectF(location);
137
+ }
138
+
139
+ public void setLocation(RectF location) {
140
+ this.location = location;
141
+ }
142
+
143
+ @Override
144
+ public String toString() {
145
+ String resultString = "";
146
+ if (id != null) {
147
+ resultString += "[" + id + "] ";
148
+ }
149
+
150
+ if (title != null) {
151
+ resultString += title + " ";
152
+ }
153
+
154
+ if (confidence != null) {
155
+ resultString += String.format("(%.1f%%) ", confidence * 100.0f);
156
+ }
157
+
158
+ if (location != null) {
159
+ resultString += location + " ";
160
+ }
161
+
162
+ return resultString.trim();
163
+ }
164
+ }
165
+
166
+ /** Initializes a {@code Classifier}. */
167
+ protected Classifier(Activity activity, Device device, int numThreads) throws IOException {
168
+ if (device != Device.CPU || numThreads != 1) {
169
+ throw new IllegalArgumentException(
170
+ "Manipulating the hardware accelerators and numbers of threads is not allowed in the Task"
171
+ + " library currently. Only CPU + single thread is allowed.");
172
+ }
173
+
174
+ // Create the ImageClassifier instance.
175
+ ImageClassifierOptions options =
176
+ ImageClassifierOptions.builder().setMaxResults(MAX_RESULTS).build();
177
+ imageClassifier = ImageClassifier.createFromFileAndOptions(activity, getModelPath(), options);
178
+ Log.d(TAG, "Created a Tensorflow Lite Image Classifier.");
179
+
180
+ // Get the input image size information of the underlying tflite model.
181
+ MappedByteBuffer tfliteModel = FileUtil.loadMappedFile(activity, getModelPath());
182
+ MetadataExtractor metadataExtractor = new MetadataExtractor(tfliteModel);
183
+ // Image shape is in the format of {1, height, width, 3}.
184
+ int[] imageShape = metadataExtractor.getInputTensorShape(/*inputIndex=*/ 0);
185
+ imageSizeY = imageShape[1];
186
+ imageSizeX = imageShape[2];
187
+ }
188
+
189
+ /** Runs inference and returns the classification results. */
190
+ public List<Recognition> recognizeImage(final Bitmap bitmap, int sensorOrientation) {
191
+ // Logs this method so that it can be analyzed with systrace.
192
+ Trace.beginSection("recognizeImage");
193
+
194
+ TensorImage inputImage = TensorImage.fromBitmap(bitmap);
195
+ int width = bitmap.getWidth();
196
+ int height = bitmap.getHeight();
197
+ int cropSize = min(width, height);
198
+ // TODO(b/169379396): investigate the impact of the resize algorithm on accuracy.
199
+ // Task Library resize the images using bilinear interpolation, which is slightly different from
200
+ // the nearest neighbor sampling algorithm used in lib_support. See
201
+ // https://github.com/tensorflow/examples/blob/0ef3d93e2af95d325c70ef3bcbbd6844d0631e07/lite/examples/image_classification/android/lib_support/src/main/java/org/tensorflow/lite/examples/classification/tflite/Classifier.java#L310.
202
+ ImageProcessingOptions imageOptions =
203
+ ImageProcessingOptions.builder()
204
+ .setOrientation(getOrientation(sensorOrientation))
205
+ // Set the ROI to the center of the image.
206
+ .setRoi(
207
+ new Rect(
208
+ /*left=*/ (width - cropSize) / 2,
209
+ /*top=*/ (height - cropSize) / 2,
210
+ /*right=*/ (width + cropSize) / 2,
211
+ /*bottom=*/ (height + cropSize) / 2))
212
+ .build();
213
+
214
+ // Runs the inference call.
215
+ Trace.beginSection("runInference");
216
+ long startTimeForReference = SystemClock.uptimeMillis();
217
+ List<Classifications> results = imageClassifier.classify(inputImage, imageOptions);
218
+ long endTimeForReference = SystemClock.uptimeMillis();
219
+ Trace.endSection();
220
+ Log.v(TAG, "Timecost to run model inference: " + (endTimeForReference - startTimeForReference));
221
+
222
+ Trace.endSection();
223
+
224
+ return getRecognitions(results);
225
+ }
226
+
227
+ /** Closes the interpreter and model to release resources. */
228
+ public void close() {
229
+ if (imageClassifier != null) {
230
+ imageClassifier.close();
231
+ }
232
+ }
233
+
234
+ /** Get the image size along the x axis. */
235
+ public int getImageSizeX() {
236
+ return imageSizeX;
237
+ }
238
+
239
+ /** Get the image size along the y axis. */
240
+ public int getImageSizeY() {
241
+ return imageSizeY;
242
+ }
243
+
244
+ /**
245
+ * Converts a list of {@link Classifications} objects into a list of {@link Recognition} objects
246
+ * to match the interface of other inference method, such as using the <a
247
+ * href="https://github.com/tensorflow/examples/tree/master/lite/examples/image_classification/android/lib_support">TFLite
248
+ * Support Library.</a>.
249
+ */
250
+ private static List<Recognition> getRecognitions(List<Classifications> classifications) {
251
+
252
+ final ArrayList<Recognition> recognitions = new ArrayList<>();
253
+ // All the demo models are single head models. Get the first Classifications in the results.
254
+ for (Category category : classifications.get(0).getCategories()) {
255
+ recognitions.add(
256
+ new Recognition(
257
+ "" + category.getLabel(), category.getLabel(), category.getScore(), null));
258
+ }
259
+ return recognitions;
260
+ }
261
+
262
+ /* Convert the camera orientation in degree into {@link ImageProcessingOptions#Orientation}.*/
263
+ private static Orientation getOrientation(int cameraOrientation) {
264
+ switch (cameraOrientation / 90) {
265
+ case 3:
266
+ return Orientation.BOTTOM_LEFT;
267
+ case 2:
268
+ return Orientation.BOTTOM_RIGHT;
269
+ case 1:
270
+ return Orientation.TOP_RIGHT;
271
+ default:
272
+ return Orientation.TOP_LEFT;
273
+ }
274
+ }
275
+
276
+ /** Gets the name of the model file stored in Assets. */
277
+ protected abstract String getModelPath();
278
+ }
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/android/lib_task_api/src/main/java/org/tensorflow/lite/examples/classification/tflite/ClassifierFloatEfficientNet.java ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ package org.tensorflow.lite.examples.classification.tflite;
17
+
18
+ import android.app.Activity;
19
+ import java.io.IOException;
20
+ import org.tensorflow.lite.examples.classification.tflite.Classifier.Device;
21
+
22
+ /** This TensorFlowLite classifier works with the float EfficientNet model. */
23
+ public class ClassifierFloatEfficientNet extends Classifier {
24
+
25
+ /**
26
+ * Initializes a {@code ClassifierFloatMobileNet}.
27
+ *
28
+ * @param device a {@link Device} object to configure the hardware accelerator
29
+ * @param numThreads the number of threads during the inference
30
+ * @throws IOException if the model is not loaded correctly
31
+ */
32
+ public ClassifierFloatEfficientNet(Activity activity, Device device, int numThreads)
33
+ throws IOException {
34
+ super(activity, device, numThreads);
35
+ }
36
+
37
+ @Override
38
+ protected String getModelPath() {
39
+ // you can download this file from
40
+ // see build.gradle for where to obtain this file. It should be auto
41
+ // downloaded into assets.
42
+ //return "efficientnet-lite0-fp32.tflite";
43
+ return "model.tflite";
44
+ }
45
+ }
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/android/lib_task_api/src/main/java/org/tensorflow/lite/examples/classification/tflite/ClassifierQuantizedEfficientNet.java ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ package org.tensorflow.lite.examples.classification.tflite;
17
+
18
+ import android.app.Activity;
19
+ import java.io.IOException;
20
+
21
+ /** This TensorFlow Lite classifier works with the quantized EfficientNet model. */
22
+ public class ClassifierQuantizedEfficientNet extends Classifier {
23
+
24
+ /**
25
+ * Initializes a {@code ClassifierQuantizedMobileNet}.
26
+ *
27
+ * @param device a {@link Device} object to configure the hardware accelerator
28
+ * @param numThreads the number of threads during the inference
29
+ * @throws IOException if the model is not loaded correctly
30
+ */
31
+ public ClassifierQuantizedEfficientNet(Activity activity, Device device, int numThreads)
32
+ throws IOException {
33
+ super(activity, device, numThreads);
34
+ }
35
+
36
+ @Override
37
+ protected String getModelPath() {
38
+ // you can download this file from
39
+ // see build.gradle for where to obtain this file. It should be auto
40
+ // downloaded into assets.
41
+ return "efficientnet-lite0-int8.tflite";
42
+ }
43
+ }
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/android/lib_task_api/src/main/java/org/tensorflow/lite/examples/classification/tflite/ClassifierQuantizedMobileNet.java ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ ==============================================================================*/
15
+
16
+ package org.tensorflow.lite.examples.classification.tflite;
17
+
18
+ import android.app.Activity;
19
+ import java.io.IOException;
20
+ import org.tensorflow.lite.examples.classification.tflite.Classifier.Device;
21
+
22
+ /** This TensorFlow Lite classifier works with the quantized MobileNet model. */
23
+ public class ClassifierQuantizedMobileNet extends Classifier {
24
+
25
+ /**
26
+ * Initializes a {@code ClassifierQuantizedMobileNet}.
27
+ *
28
+ * @param device a {@link Device} object to configure the hardware accelerator
29
+ * @param numThreads the number of threads during the inference
30
+ * @throws IOException if the model is not loaded correctly
31
+ */
32
+ public ClassifierQuantizedMobileNet(Activity activity, Device device, int numThreads)
33
+ throws IOException {
34
+ super(activity, device, numThreads);
35
+ }
36
+
37
+ @Override
38
+ protected String getModelPath() {
39
+ // you can download this file from
40
+ // see build.gradle for where to obtain this file. It should be auto
41
+ // downloaded into assets.
42
+ return "mobilenet_v1_1.0_224_quant.tflite";
43
+ }
44
+ }
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/android/models/build.gradle ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ apply plugin: 'com.android.library'
2
+ apply plugin: 'de.undercouch.download'
3
+
4
+ android {
5
+ compileSdkVersion 28
6
+ buildToolsVersion "28.0.0"
7
+
8
+ defaultConfig {
9
+ minSdkVersion 21
10
+ targetSdkVersion 28
11
+ versionCode 1
12
+ versionName "1.0"
13
+
14
+ testInstrumentationRunner "androidx.test.runner.AndroidJUnitRunner"
15
+
16
+ }
17
+
18
+ buildTypes {
19
+ release {
20
+ minifyEnabled false
21
+ proguardFiles getDefaultProguardFile('proguard-android-optimize.txt'), 'proguard-rules.pro'
22
+ }
23
+ }
24
+
25
+ aaptOptions {
26
+ noCompress "tflite"
27
+ }
28
+
29
+ lintOptions {
30
+ checkReleaseBuilds false
31
+ // Or, if you prefer, you can continue to check for errors in release builds,
32
+ // but continue the build even when errors are found:
33
+ abortOnError false
34
+ }
35
+ }
36
+
37
+ // Download default models; if you wish to use your own models then
38
+ // place them in the "assets" directory and comment out this line.
39
+ project.ext.ASSET_DIR = projectDir.toString() + '/src/main/assets'
40
+ apply from:'download.gradle'
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/android/models/download.gradle ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ def modelFloatDownloadUrl = "https://github.com/isl-org/MiDaS/releases/download/v2_1/model_opt.tflite"
2
+ def modelFloatFile = "model_opt.tflite"
3
+
4
+ task downloadModelFloat(type: Download) {
5
+ src "${modelFloatDownloadUrl}"
6
+ dest project.ext.ASSET_DIR + "/${modelFloatFile}"
7
+ overwrite false
8
+ }
9
+
10
+ preBuild.dependsOn downloadModelFloat
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/android/models/proguard-rules.pro ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Add project specific ProGuard rules here.
2
+ # You can control the set of applied configuration files using the
3
+ # proguardFiles setting in build.gradle.
4
+ #
5
+ # For more details, see
6
+ # http://developer.android.com/guide/developing/tools/proguard.html
7
+
8
+ # If your project uses WebView with JS, uncomment the following
9
+ # and specify the fully qualified class name to the JavaScript interface
10
+ # class:
11
+ #-keepclassmembers class fqcn.of.javascript.interface.for.webview {
12
+ # public *;
13
+ #}
14
+
15
+ # Uncomment this to preserve the line number information for
16
+ # debugging stack traces.
17
+ #-keepattributes SourceFile,LineNumberTable
18
+
19
+ # If you keep the line number information, uncomment this to
20
+ # hide the original source file name.
21
+ #-renamesourcefileattribute SourceFile
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/android/models/src/main/AndroidManifest.xml ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ <manifest xmlns:android="http://schemas.android.com/apk/res/android"
2
+ package="org.tensorflow.lite.examples.classification.models">
3
+ </manifest>
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/android/models/src/main/assets/run_tflite.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Flex ops are included in the nightly build of the TensorFlow Python package. You can use TFLite models containing Flex ops by the same Python API as normal TFLite models. The nightly TensorFlow build can be installed with this command:
2
+ # Flex ops will be added to the TensorFlow Python package's and the tflite_runtime package from version 2.3 for Linux and 2.4 for other environments.
3
+ # https://www.tensorflow.org/lite/guide/ops_select#running_the_model
4
+
5
+ # You must use: tf-nightly
6
+ # pip install tf-nightly
7
+
8
+ import os
9
+ import glob
10
+ import cv2
11
+ import numpy as np
12
+
13
+ import tensorflow as tf
14
+
15
+ width=256
16
+ height=256
17
+ model_name="model.tflite"
18
+ #model_name="model_quant.tflite"
19
+ image_name="dog.jpg"
20
+
21
+ # input
22
+ img = cv2.imread(image_name)
23
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) / 255.0
24
+
25
+ mean=[0.485, 0.456, 0.406]
26
+ std=[0.229, 0.224, 0.225]
27
+ img = (img - mean) / std
28
+
29
+ img_resized = tf.image.resize(img, [width,height], method='bicubic', preserve_aspect_ratio=False)
30
+ #img_resized = tf.transpose(img_resized, [2, 0, 1])
31
+ img_input = img_resized.numpy()
32
+ reshape_img = img_input.reshape(1,width,height,3)
33
+ tensor = tf.convert_to_tensor(reshape_img, dtype=tf.float32)
34
+
35
+ # load model
36
+ print("Load model...")
37
+ interpreter = tf.lite.Interpreter(model_path=model_name)
38
+ print("Allocate tensor...")
39
+ interpreter.allocate_tensors()
40
+ print("Get input/output details...")
41
+ input_details = interpreter.get_input_details()
42
+ output_details = interpreter.get_output_details()
43
+ print("Get input shape...")
44
+ input_shape = input_details[0]['shape']
45
+ print(input_shape)
46
+ print(input_details)
47
+ print(output_details)
48
+ #input_data = np.array(np.random.random_sample(input_shape), dtype=np.float32)
49
+ print("Set input tensor...")
50
+ interpreter.set_tensor(input_details[0]['index'], tensor)
51
+
52
+ print("invoke()...")
53
+ interpreter.invoke()
54
+
55
+ # The function `get_tensor()` returns a copy of the tensor data.
56
+ # Use `tensor()` in order to get a pointer to the tensor.
57
+ print("get output tensor...")
58
+ output = interpreter.get_tensor(output_details[0]['index'])
59
+ #output = np.squeeze(output)
60
+ output = output.reshape(width, height)
61
+ #print(output)
62
+ prediction = np.array(output)
63
+ print("reshape prediction...")
64
+ prediction = prediction.reshape(width, height)
65
+
66
+ # output file
67
+ #prediction = cv2.resize(prediction, (img.shape[1], img.shape[0]), interpolation=cv2.INTER_CUBIC)
68
+ print(" Write image to: output.png")
69
+ depth_min = prediction.min()
70
+ depth_max = prediction.max()
71
+ img_out = (255 * (prediction - depth_min) / (depth_max - depth_min)).astype("uint8")
72
+ print("save output image...")
73
+ cv2.imwrite("output.png", img_out)
74
+
75
+ print("finished")
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/additions/do_catkin_make.sh ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ mkdir src
2
+ catkin_make
3
+ source devel/setup.bash
4
+ echo $ROS_PACKAGE_PATH
5
+ chmod +x ./devel/setup.bash
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/additions/install_ros_melodic_ubuntu_17_18.sh ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #@title { display-mode: "code" }
2
+
3
+ #from http://wiki.ros.org/indigo/Installation/Ubuntu
4
+
5
+ #1.2 Setup sources.list
6
+ sudo sh -c 'echo "deb http://packages.ros.org/ros/ubuntu $(lsb_release -sc) main" > /etc/apt/sources.list.d/ros-latest.list'
7
+
8
+ # 1.3 Setup keys
9
+ sudo apt-key adv --keyserver 'hkp://keyserver.ubuntu.com:80' --recv-key C1CF6E31E6BADE8868B172B4F42ED6FBAB17C654
10
+ sudo apt-key adv --keyserver 'hkp://ha.pool.sks-keyservers.net:80' --recv-key 421C365BD9FF1F717815A3895523BAEEB01FA116
11
+
12
+ curl -sSL 'http://keyserver.ubuntu.com/pks/lookup?op=get&search=0xC1CF6E31E6BADE8868B172B4F42ED6FBAB17C654' | sudo apt-key add -
13
+
14
+ # 1.4 Installation
15
+ sudo apt-get update
16
+ sudo apt-get upgrade
17
+
18
+ # Desktop-Full Install:
19
+ sudo apt-get install ros-melodic-desktop-full
20
+
21
+ printf "\nsource /opt/ros/melodic/setup.bash\n" >> ~/.bashrc
22
+
23
+ # 1.5 Initialize rosdep
24
+ sudo rosdep init
25
+ rosdep update
26
+
27
+
28
+ # 1.7 Getting rosinstall (python)
29
+ sudo apt-get install python-rosinstall
30
+ sudo apt-get install python-catkin-tools
31
+ sudo apt-get install python-rospy
32
+ sudo apt-get install python-rosdep
33
+ sudo apt-get install python-roscd
34
+ sudo apt-get install python-pip
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/CMakeLists.txt ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cmake_minimum_required(VERSION 3.0.2)
2
+ project(midas_cpp)
3
+
4
+ ## Compile as C++11, supported in ROS Kinetic and newer
5
+ # add_compile_options(-std=c++11)
6
+
7
+ ## Find catkin macros and libraries
8
+ ## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
9
+ ## is used, also find other catkin packages
10
+ find_package(catkin REQUIRED COMPONENTS
11
+ cv_bridge
12
+ image_transport
13
+ roscpp
14
+ rospy
15
+ sensor_msgs
16
+ std_msgs
17
+ )
18
+
19
+ ## System dependencies are found with CMake's conventions
20
+ # find_package(Boost REQUIRED COMPONENTS system)
21
+
22
+ list(APPEND CMAKE_PREFIX_PATH "~/libtorch")
23
+ list(APPEND CMAKE_PREFIX_PATH "/usr/local/lib/python3.6/dist-packages/torch/lib")
24
+ list(APPEND CMAKE_PREFIX_PATH "/usr/local/lib/python2.7/dist-packages/torch/lib")
25
+
26
+ if(NOT EXISTS "~/libtorch")
27
+ if (EXISTS "/usr/local/lib/python3.6/dist-packages/torch")
28
+ include_directories(/usr/local/include)
29
+ include_directories(/usr/local/lib/python3.6/dist-packages/torch/include/torch/csrc/api/include)
30
+ include_directories(/usr/local/lib/python3.6/dist-packages/torch/include)
31
+
32
+ link_directories(/usr/local/lib)
33
+ link_directories(/usr/local/lib/python3.6/dist-packages/torch/lib)
34
+
35
+ set(CMAKE_PREFIX_PATH /usr/local/lib/python3.6/dist-packages/torch)
36
+ set(Boost_USE_MULTITHREADED ON)
37
+ set(Torch_DIR /usr/local/lib/python3.6/dist-packages/torch)
38
+
39
+ elseif (EXISTS "/usr/local/lib/python2.7/dist-packages/torch")
40
+
41
+ include_directories(/usr/local/include)
42
+ include_directories(/usr/local/lib/python2.7/dist-packages/torch/include/torch/csrc/api/include)
43
+ include_directories(/usr/local/lib/python2.7/dist-packages/torch/include)
44
+
45
+ link_directories(/usr/local/lib)
46
+ link_directories(/usr/local/lib/python2.7/dist-packages/torch/lib)
47
+
48
+ set(CMAKE_PREFIX_PATH /usr/local/lib/python2.7/dist-packages/torch)
49
+ set(Boost_USE_MULTITHREADED ON)
50
+ set(Torch_DIR /usr/local/lib/python2.7/dist-packages/torch)
51
+ endif()
52
+ endif()
53
+
54
+
55
+
56
+ find_package(Torch REQUIRED)
57
+ find_package(OpenCV REQUIRED)
58
+ include_directories( ${OpenCV_INCLUDE_DIRS} )
59
+
60
+ add_executable(midas_cpp src/main.cpp)
61
+ target_link_libraries(midas_cpp "${TORCH_LIBRARIES}" "${OpenCV_LIBS} ${catkin_LIBRARIES}")
62
+ set_property(TARGET midas_cpp PROPERTY CXX_STANDARD 14)
63
+
64
+
65
+
66
+ ###################################
67
+ ## catkin specific configuration ##
68
+ ###################################
69
+ ## The catkin_package macro generates cmake config files for your package
70
+ ## Declare things to be passed to dependent projects
71
+ ## INCLUDE_DIRS: uncomment this if your package contains header files
72
+ ## LIBRARIES: libraries you create in this project that dependent projects also need
73
+ ## CATKIN_DEPENDS: catkin_packages dependent projects also need
74
+ ## DEPENDS: system dependencies of this project that dependent projects also need
75
+ catkin_package(
76
+ # INCLUDE_DIRS include
77
+ # LIBRARIES midas_cpp
78
+ # CATKIN_DEPENDS cv_bridge image_transport roscpp sensor_msgs std_msgs
79
+ # DEPENDS system_lib
80
+ )
81
+
82
+ ###########
83
+ ## Build ##
84
+ ###########
85
+
86
+ ## Specify additional locations of header files
87
+ ## Your package locations should be listed before other locations
88
+ include_directories(
89
+ # include
90
+ ${catkin_INCLUDE_DIRS}
91
+ )
92
+
93
+ ## Declare a C++ library
94
+ # add_library(${PROJECT_NAME}
95
+ # src/${PROJECT_NAME}/midas_cpp.cpp
96
+ # )
97
+
98
+ ## Add cmake target dependencies of the library
99
+ ## as an example, code may need to be generated before libraries
100
+ ## either from message generation or dynamic reconfigure
101
+ # add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
102
+
103
+ ## Declare a C++ executable
104
+ ## With catkin_make all packages are built within a single CMake context
105
+ ## The recommended prefix ensures that target names across packages don't collide
106
+ # add_executable(${PROJECT_NAME}_node src/midas_cpp_node.cpp)
107
+
108
+ ## Rename C++ executable without prefix
109
+ ## The above recommended prefix causes long target names, the following renames the
110
+ ## target back to the shorter version for ease of user use
111
+ ## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node"
112
+ # set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "")
113
+
114
+ ## Add cmake target dependencies of the executable
115
+ ## same as for the library above
116
+ # add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS})
117
+
118
+ ## Specify libraries to link a library or executable target against
119
+ # target_link_libraries(${PROJECT_NAME}_node
120
+ # ${catkin_LIBRARIES}
121
+ # )
122
+
123
+ #############
124
+ ## Install ##
125
+ #############
126
+
127
+ # all install targets should use catkin DESTINATION variables
128
+ # See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html
129
+
130
+ ## Mark executable scripts (Python etc.) for installation
131
+ ## in contrast to setup.py, you can choose the destination
132
+ # catkin_install_python(PROGRAMS
133
+ # scripts/my_python_script
134
+ # DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
135
+ # )
136
+
137
+ ## Mark executables for installation
138
+ ## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html
139
+ # install(TARGETS ${PROJECT_NAME}_node
140
+ # RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
141
+ # )
142
+
143
+ ## Mark libraries for installation
144
+ ## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html
145
+ # install(TARGETS ${PROJECT_NAME}
146
+ # ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
147
+ # LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
148
+ # RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION}
149
+ # )
150
+
151
+ ## Mark cpp header files for installation
152
+ # install(DIRECTORY include/${PROJECT_NAME}/
153
+ # DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION}
154
+ # FILES_MATCHING PATTERN "*.h"
155
+ # PATTERN ".svn" EXCLUDE
156
+ # )
157
+
158
+ ## Mark other files for installation (e.g. launch and bag files, etc.)
159
+ # install(FILES
160
+ # # myfile1
161
+ # # myfile2
162
+ # DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
163
+ # )
164
+
165
+ #############
166
+ ## Testing ##
167
+ #############
168
+
169
+ ## Add gtest based cpp test target and link libraries
170
+ # catkin_add_gtest(${PROJECT_NAME}-test test/test_midas_cpp.cpp)
171
+ # if(TARGET ${PROJECT_NAME}-test)
172
+ # target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME})
173
+ # endif()
174
+
175
+ ## Add folders to be run by python nosetests
176
+ # catkin_add_nosetests(test)
177
+
178
+ install(TARGETS ${PROJECT_NAME}
179
+ ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
180
+ LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION}
181
+ RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
182
+ )
183
+
184
+ add_custom_command(
185
+ TARGET midas_cpp POST_BUILD
186
+ COMMAND ${CMAKE_COMMAND} -E copy
187
+ ${CMAKE_CURRENT_BINARY_DIR}/midas_cpp
188
+ ${CMAKE_SOURCE_DIR}/midas_cpp
189
+ )
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/launch/midas_cpp.launch ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <launch>
2
+ <arg name="input_topic" default="image_topic"/>
3
+ <arg name="output_topic" default="midas_topic"/>
4
+ <arg name="model_name" default="model-small-traced.pt"/>
5
+ <arg name="out_orig_size" default="true"/>
6
+ <arg name="net_width" default="256"/>
7
+ <arg name="net_height" default="256"/>
8
+ <arg name="logging" default="false"/>
9
+
10
+ <node pkg="midas_cpp" type="midas_cpp" name="midas_cpp" output="log" respawn="true">
11
+ <param name="input_topic" value="$(arg input_topic)"/>
12
+ <param name="output_topic" value="$(arg output_topic)"/>
13
+ <param name="model_name" value="$(arg model_name)"/>
14
+ <param name="out_orig_size" value="$(arg out_orig_size)"/>
15
+ <param name="net_width" value="$(arg net_width)"/>
16
+ <param name="net_height" value="$(arg net_height)"/>
17
+ <param name="logging" value="$(arg logging)"/>
18
+ </node>
19
+ </launch>
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/launch/midas_talker_listener.launch ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <launch>
2
+ <arg name="use_camera" default="false"/>
3
+ <arg name="input_video_file" default="test.mp4"/>
4
+
5
+ <arg name="show_output" default="true"/>
6
+ <arg name="save_output" default="false"/>
7
+ <arg name="output_video_file" default="result.mp4"/>
8
+
9
+ <node pkg="midas_cpp" type="talker.py" name="talker" output="log" respawn="true">
10
+ <param name="use_camera" value="$(arg use_camera)"/>
11
+ <param name="input_video_file" value="$(arg input_video_file)"/>
12
+ </node>
13
+
14
+ <node pkg="midas_cpp" type="listener.py" name="listener" output="log" respawn="true">
15
+ <param name="show_output" value="$(arg show_output)"/>
16
+ <param name="save_output" value="$(arg save_output)"/>
17
+ <param name="output_video_file" value="$(arg output_video_file)"/>
18
+ </node>
19
+
20
+ <node pkg="midas_cpp" type="listener_original.py" name="listener_original" output="log" respawn="true">
21
+ <param name="show_output" value="$(arg show_output)"/>
22
+ </node>
23
+ </launch>
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/package.xml ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0"?>
2
+ <package format="2">
3
+ <name>midas_cpp</name>
4
+ <version>0.1.0</version>
5
+ <description>The midas_cpp package</description>
6
+
7
+ <maintainer email="alexeyab84@gmail.com">Alexey Bochkovskiy</maintainer>
8
+ <license>MIT</license>
9
+ <url type="website">https://github.com/isl-org/MiDaS/tree/master/ros</url>
10
+ <!-- <author email="alexeyab84@gmail.com">Alexey Bochkovskiy</author> -->
11
+
12
+
13
+ <!-- One license tag required, multiple allowed, one license per tag -->
14
+ <!-- Commonly used license strings: -->
15
+ <!-- BSD, MIT, Boost Software License, GPLv2, GPLv3, LGPLv2.1, LGPLv3 -->
16
+ <license>TODO</license>
17
+
18
+
19
+ <!-- Url tags are optional, but multiple are allowed, one per tag -->
20
+ <!-- Optional attribute type can be: website, bugtracker, or repository -->
21
+ <!-- Example: -->
22
+ <!-- <url type="website">http://wiki.ros.org/midas_cpp</url> -->
23
+
24
+
25
+ <!-- Author tags are optional, multiple are allowed, one per tag -->
26
+ <!-- Authors do not have to be maintainers, but could be -->
27
+ <!-- Example: -->
28
+ <!-- <author email="jane.doe@example.com">Jane Doe</author> -->
29
+
30
+
31
+ <!-- The *depend tags are used to specify dependencies -->
32
+ <!-- Dependencies can be catkin packages or system dependencies -->
33
+ <!-- Examples: -->
34
+ <!-- Use depend as a shortcut for packages that are both build and exec dependencies -->
35
+ <!-- <depend>roscpp</depend> -->
36
+ <!-- Note that this is equivalent to the following: -->
37
+ <!-- <build_depend>roscpp</build_depend> -->
38
+ <!-- <exec_depend>roscpp</exec_depend> -->
39
+ <!-- Use build_depend for packages you need at compile time: -->
40
+ <!-- <build_depend>message_generation</build_depend> -->
41
+ <!-- Use build_export_depend for packages you need in order to build against this package: -->
42
+ <!-- <build_export_depend>message_generation</build_export_depend> -->
43
+ <!-- Use buildtool_depend for build tool packages: -->
44
+ <!-- <buildtool_depend>catkin</buildtool_depend> -->
45
+ <!-- Use exec_depend for packages you need at runtime: -->
46
+ <!-- <exec_depend>message_runtime</exec_depend> -->
47
+ <!-- Use test_depend for packages you need only for testing: -->
48
+ <!-- <test_depend>gtest</test_depend> -->
49
+ <!-- Use doc_depend for packages you need only for building documentation: -->
50
+ <!-- <doc_depend>doxygen</doc_depend> -->
51
+ <buildtool_depend>catkin</buildtool_depend>
52
+ <build_depend>cv_bridge</build_depend>
53
+ <build_depend>image_transport</build_depend>
54
+ <build_depend>roscpp</build_depend>
55
+ <build_depend>rospy</build_depend>
56
+ <build_depend>sensor_msgs</build_depend>
57
+ <build_depend>std_msgs</build_depend>
58
+ <build_export_depend>cv_bridge</build_export_depend>
59
+ <build_export_depend>image_transport</build_export_depend>
60
+ <build_export_depend>roscpp</build_export_depend>
61
+ <build_export_depend>rospy</build_export_depend>
62
+ <build_export_depend>sensor_msgs</build_export_depend>
63
+ <build_export_depend>std_msgs</build_export_depend>
64
+ <exec_depend>cv_bridge</exec_depend>
65
+ <exec_depend>image_transport</exec_depend>
66
+ <exec_depend>roscpp</exec_depend>
67
+ <exec_depend>rospy</exec_depend>
68
+ <exec_depend>sensor_msgs</exec_depend>
69
+ <exec_depend>std_msgs</exec_depend>
70
+
71
+
72
+ <!-- The export tag contains other, unspecified, tags -->
73
+ <export>
74
+ <!-- Other tools can request additional information be placed here -->
75
+
76
+ </export>
77
+ </package>
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/scripts/listener.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ from __future__ import print_function
3
+
4
+ import roslib
5
+ #roslib.load_manifest('my_package')
6
+ import sys
7
+ import rospy
8
+ import cv2
9
+ import numpy as np
10
+ from std_msgs.msg import String
11
+ from sensor_msgs.msg import Image
12
+ from cv_bridge import CvBridge, CvBridgeError
13
+
14
+ class video_show:
15
+
16
+ def __init__(self):
17
+ self.show_output = rospy.get_param('~show_output', True)
18
+ self.save_output = rospy.get_param('~save_output', False)
19
+ self.output_video_file = rospy.get_param('~output_video_file','result.mp4')
20
+ # rospy.loginfo(f"Listener - params: show_output={self.show_output}, save_output={self.save_output}, output_video_file={self.output_video_file}")
21
+
22
+ self.bridge = CvBridge()
23
+ self.image_sub = rospy.Subscriber("midas_topic", Image, self.callback)
24
+
25
+ def callback(self, data):
26
+ try:
27
+ cv_image = self.bridge.imgmsg_to_cv2(data)
28
+ except CvBridgeError as e:
29
+ print(e)
30
+ return
31
+
32
+ if cv_image.size == 0:
33
+ return
34
+
35
+ rospy.loginfo("Listener: Received new frame")
36
+ cv_image = cv_image.astype("uint8")
37
+
38
+ if self.show_output==True:
39
+ cv2.imshow("video_show", cv_image)
40
+ cv2.waitKey(10)
41
+
42
+ if self.save_output==True:
43
+ if self.video_writer_init==False:
44
+ fourcc = cv2.VideoWriter_fourcc(*'XVID')
45
+ self.out = cv2.VideoWriter(self.output_video_file, fourcc, 25, (cv_image.shape[1], cv_image.shape[0]))
46
+
47
+ self.out.write(cv_image)
48
+
49
+
50
+
51
+ def main(args):
52
+ rospy.init_node('listener', anonymous=True)
53
+ ic = video_show()
54
+ try:
55
+ rospy.spin()
56
+ except KeyboardInterrupt:
57
+ print("Shutting down")
58
+ cv2.destroyAllWindows()
59
+
60
+ if __name__ == '__main__':
61
+ main(sys.argv)
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/scripts/listener_original.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ from __future__ import print_function
3
+
4
+ import roslib
5
+ #roslib.load_manifest('my_package')
6
+ import sys
7
+ import rospy
8
+ import cv2
9
+ import numpy as np
10
+ from std_msgs.msg import String
11
+ from sensor_msgs.msg import Image
12
+ from cv_bridge import CvBridge, CvBridgeError
13
+
14
+ class video_show:
15
+
16
+ def __init__(self):
17
+ self.show_output = rospy.get_param('~show_output', True)
18
+ self.save_output = rospy.get_param('~save_output', False)
19
+ self.output_video_file = rospy.get_param('~output_video_file','result.mp4')
20
+ # rospy.loginfo(f"Listener original - params: show_output={self.show_output}, save_output={self.save_output}, output_video_file={self.output_video_file}")
21
+
22
+ self.bridge = CvBridge()
23
+ self.image_sub = rospy.Subscriber("image_topic", Image, self.callback)
24
+
25
+ def callback(self, data):
26
+ try:
27
+ cv_image = self.bridge.imgmsg_to_cv2(data)
28
+ except CvBridgeError as e:
29
+ print(e)
30
+ return
31
+
32
+ if cv_image.size == 0:
33
+ return
34
+
35
+ rospy.loginfo("Listener_original: Received new frame")
36
+ cv_image = cv_image.astype("uint8")
37
+
38
+ if self.show_output==True:
39
+ cv2.imshow("video_show_orig", cv_image)
40
+ cv2.waitKey(10)
41
+
42
+ if self.save_output==True:
43
+ if self.video_writer_init==False:
44
+ fourcc = cv2.VideoWriter_fourcc(*'XVID')
45
+ self.out = cv2.VideoWriter(self.output_video_file, fourcc, 25, (cv_image.shape[1], cv_image.shape[0]))
46
+
47
+ self.out.write(cv_image)
48
+
49
+
50
+
51
+ def main(args):
52
+ rospy.init_node('listener_original', anonymous=True)
53
+ ic = video_show()
54
+ try:
55
+ rospy.spin()
56
+ except KeyboardInterrupt:
57
+ print("Shutting down")
58
+ cv2.destroyAllWindows()
59
+
60
+ if __name__ == '__main__':
61
+ main(sys.argv)
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/scripts/talker.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+
4
+ import roslib
5
+ #roslib.load_manifest('my_package')
6
+ import sys
7
+ import rospy
8
+ import cv2
9
+ from std_msgs.msg import String
10
+ from sensor_msgs.msg import Image
11
+ from cv_bridge import CvBridge, CvBridgeError
12
+
13
+
14
+ def talker():
15
+ rospy.init_node('talker', anonymous=True)
16
+
17
+ use_camera = rospy.get_param('~use_camera', False)
18
+ input_video_file = rospy.get_param('~input_video_file','test.mp4')
19
+ # rospy.loginfo(f"Talker - params: use_camera={use_camera}, input_video_file={input_video_file}")
20
+
21
+ # rospy.loginfo("Talker: Trying to open a video stream")
22
+ if use_camera == True:
23
+ cap = cv2.VideoCapture(0)
24
+ else:
25
+ cap = cv2.VideoCapture(input_video_file)
26
+
27
+ pub = rospy.Publisher('image_topic', Image, queue_size=1)
28
+ rate = rospy.Rate(30) # 30hz
29
+ bridge = CvBridge()
30
+
31
+ while not rospy.is_shutdown():
32
+ ret, cv_image = cap.read()
33
+ if ret==False:
34
+ print("Talker: Video is over")
35
+ rospy.loginfo("Video is over")
36
+ return
37
+
38
+ try:
39
+ image = bridge.cv2_to_imgmsg(cv_image, "bgr8")
40
+ except CvBridgeError as e:
41
+ rospy.logerr("Talker: cv2image conversion failed: ", e)
42
+ print(e)
43
+ continue
44
+
45
+ rospy.loginfo("Talker: Publishing frame")
46
+ pub.publish(image)
47
+ rate.sleep()
48
+
49
+ if __name__ == '__main__':
50
+ try:
51
+ talker()
52
+ except rospy.ROSInterruptException:
53
+ pass
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/ros/midas_cpp/src/main.cpp ADDED
@@ -0,0 +1,285 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #include <ros/ros.h>
2
+ #include <image_transport/image_transport.h>
3
+ #include <cv_bridge/cv_bridge.h>
4
+ #include <sensor_msgs/image_encodings.h>
5
+
6
+ #include <initializer_list>
7
+
8
+ #include <torch/script.h> // One-stop header.
9
+
10
+ #include <opencv2/core/version.hpp>
11
+ #include <opencv2/imgproc/imgproc.hpp>
12
+ #include <opencv2/opencv.hpp>
13
+ #include <opencv2/opencv_modules.hpp>
14
+
15
+ #include <opencv2/highgui/highgui.hpp>
16
+ #include <opencv2/video/video.hpp>
17
+
18
+ // includes for OpenCV >= 3.x
19
+ #ifndef CV_VERSION_EPOCH
20
+ #include <opencv2/core/types.hpp>
21
+ #include <opencv2/videoio/videoio.hpp>
22
+ #include <opencv2/imgcodecs/imgcodecs.hpp>
23
+ #endif
24
+
25
+ // OpenCV includes for OpenCV 2.x
26
+ #ifdef CV_VERSION_EPOCH
27
+ #include <opencv2/highgui/highgui_c.h>
28
+ #include <opencv2/imgproc/imgproc_c.h>
29
+ #include <opencv2/core/types_c.h>
30
+ #include <opencv2/core/version.hpp>
31
+ #endif
32
+
33
+ static const std::string OPENCV_WINDOW = "Image window";
34
+
35
+ class Midas
36
+ {
37
+ ros::NodeHandle nh_;
38
+ image_transport::ImageTransport it_;
39
+ image_transport::Subscriber image_sub_;
40
+ image_transport::Publisher image_pub_;
41
+
42
+ torch::jit::script::Module module;
43
+ torch::Device device;
44
+
45
+ auto ToTensor(cv::Mat img, bool show_output = false, bool unsqueeze = false, int unsqueeze_dim = 0)
46
+ {
47
+ //std::cout << "image shape: " << img.size() << std::endl;
48
+ at::Tensor tensor_image = torch::from_blob(img.data, { img.rows, img.cols, 3 }, at::kByte);
49
+
50
+ if (unsqueeze)
51
+ {
52
+ tensor_image.unsqueeze_(unsqueeze_dim);
53
+ //std::cout << "tensors new shape: " << tensor_image.sizes() << std::endl;
54
+ }
55
+
56
+ if (show_output)
57
+ {
58
+ std::cout << tensor_image.slice(2, 0, 1) << std::endl;
59
+ }
60
+ //std::cout << "tenor shape: " << tensor_image.sizes() << std::endl;
61
+ return tensor_image;
62
+ }
63
+
64
+ auto ToInput(at::Tensor tensor_image)
65
+ {
66
+ // Create a vector of inputs.
67
+ return std::vector<torch::jit::IValue>{tensor_image};
68
+ }
69
+
70
+ auto ToCvImage(at::Tensor tensor, int cv_type = CV_8UC3)
71
+ {
72
+ int width = tensor.sizes()[0];
73
+ int height = tensor.sizes()[1];
74
+ try
75
+ {
76
+ cv::Mat output_mat;
77
+ if (cv_type == CV_8UC4 || cv_type == CV_8UC3 || cv_type == CV_8UC2 || cv_type == CV_8UC1) {
78
+ cv::Mat cv_image(cv::Size{ height, width }, cv_type, tensor.data_ptr<uchar>());
79
+ output_mat = cv_image;
80
+ }
81
+ else if (cv_type == CV_32FC4 || cv_type == CV_32FC3 || cv_type == CV_32FC2 || cv_type == CV_32FC1) {
82
+ cv::Mat cv_image(cv::Size{ height, width }, cv_type, tensor.data_ptr<float>());
83
+ output_mat = cv_image;
84
+ }
85
+ else if (cv_type == CV_64FC4 || cv_type == CV_64FC3 || cv_type == CV_64FC2 || cv_type == CV_64FC1) {
86
+ cv::Mat cv_image(cv::Size{ height, width }, cv_type, tensor.data_ptr<double>());
87
+ output_mat = cv_image;
88
+ }
89
+
90
+ //show_image(output_mat, "converted image from tensor");
91
+ return output_mat.clone();
92
+ }
93
+ catch (const c10::Error& e)
94
+ {
95
+ std::cout << "an error has occured : " << e.msg() << std::endl;
96
+ }
97
+ return cv::Mat(height, width, CV_8UC3);
98
+ }
99
+
100
+ std::string input_topic, output_topic, model_name;
101
+ bool out_orig_size;
102
+ int net_width, net_height;
103
+ torch::NoGradGuard guard;
104
+ at::Tensor mean, std;
105
+ at::Tensor output, tensor;
106
+
107
+ public:
108
+ Midas()
109
+ : nh_(), it_(nh_), device(torch::Device(torch::kCPU))
110
+ {
111
+ ros::param::param<std::string>("~input_topic", input_topic, "image_topic");
112
+ ros::param::param<std::string>("~output_topic", output_topic, "midas_topic");
113
+ ros::param::param<std::string>("~model_name", model_name, "model-small-traced.pt");
114
+ ros::param::param<bool>("~out_orig_size", out_orig_size, true);
115
+ ros::param::param<int>("~net_width", net_width, 256);
116
+ ros::param::param<int>("~net_height", net_height, 256);
117
+
118
+ std::cout << ", input_topic = " << input_topic <<
119
+ ", output_topic = " << output_topic <<
120
+ ", model_name = " << model_name <<
121
+ ", out_orig_size = " << out_orig_size <<
122
+ ", net_width = " << net_width <<
123
+ ", net_height = " << net_height <<
124
+ std::endl;
125
+
126
+ // Subscrive to input video feed and publish output video feed
127
+ image_sub_ = it_.subscribe(input_topic, 1, &Midas::imageCb, this);
128
+ image_pub_ = it_.advertise(output_topic, 1);
129
+
130
+ std::cout << "Try to load torchscript model \n";
131
+
132
+ try {
133
+ // Deserialize the ScriptModule from a file using torch::jit::load().
134
+ module = torch::jit::load(model_name);
135
+ }
136
+ catch (const c10::Error& e) {
137
+ std::cerr << "error loading the model\n";
138
+ exit(0);
139
+ }
140
+
141
+ std::cout << "ok\n";
142
+
143
+ try {
144
+ module.eval();
145
+ torch::jit::getProfilingMode() = false;
146
+ torch::jit::setGraphExecutorOptimize(true);
147
+
148
+ mean = torch::tensor({ 0.485, 0.456, 0.406 });
149
+ std = torch::tensor({ 0.229, 0.224, 0.225 });
150
+
151
+ if (torch::hasCUDA()) {
152
+ std::cout << "cuda is available" << std::endl;
153
+ at::globalContext().setBenchmarkCuDNN(true);
154
+ device = torch::Device(torch::kCUDA);
155
+ module.to(device);
156
+ mean = mean.to(device);
157
+ std = std.to(device);
158
+ }
159
+ }
160
+ catch (const c10::Error& e)
161
+ {
162
+ std::cerr << " module initialization: " << e.msg() << std::endl;
163
+ }
164
+ }
165
+
166
+ ~Midas()
167
+ {
168
+ }
169
+
170
+ void imageCb(const sensor_msgs::ImageConstPtr& msg)
171
+ {
172
+ cv_bridge::CvImagePtr cv_ptr;
173
+ try
174
+ {
175
+ // sensor_msgs::Image to cv::Mat
176
+ cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::RGB8);
177
+ }
178
+ catch (cv_bridge::Exception& e)
179
+ {
180
+ ROS_ERROR("cv_bridge exception: %s", e.what());
181
+ return;
182
+ }
183
+
184
+ // pre-processing
185
+ auto tensor_cpu = ToTensor(cv_ptr->image); // OpenCV-image -> Libtorch-tensor
186
+
187
+ try {
188
+ tensor = tensor_cpu.to(device); // move to device (CPU or GPU)
189
+
190
+ tensor = tensor.toType(c10::kFloat);
191
+ tensor = tensor.permute({ 2, 0, 1 }); // HWC -> CHW
192
+ tensor = tensor.unsqueeze(0);
193
+ tensor = at::upsample_bilinear2d(tensor, { net_height, net_width }, true); // resize
194
+ tensor = tensor.squeeze(0);
195
+ tensor = tensor.permute({ 1, 2, 0 }); // CHW -> HWC
196
+
197
+ tensor = tensor.div(255).sub(mean).div(std); // normalization
198
+ tensor = tensor.permute({ 2, 0, 1 }); // HWC -> CHW
199
+ tensor.unsqueeze_(0); // CHW -> NCHW
200
+ }
201
+ catch (const c10::Error& e)
202
+ {
203
+ std::cerr << " pre-processing exception: " << e.msg() << std::endl;
204
+ return;
205
+ }
206
+
207
+ auto input_to_net = ToInput(tensor); // input to the network
208
+
209
+ // inference
210
+ output;
211
+ try {
212
+ output = module.forward(input_to_net).toTensor(); // run inference
213
+ }
214
+ catch (const c10::Error& e)
215
+ {
216
+ std::cerr << " module.forward() exception: " << e.msg() << std::endl;
217
+ return;
218
+ }
219
+
220
+ output = output.detach().to(torch::kF32);
221
+
222
+ // move to CPU temporary
223
+ at::Tensor output_tmp = output;
224
+ output_tmp = output_tmp.to(torch::kCPU);
225
+
226
+ // normalization
227
+ float min_val = std::numeric_limits<float>::max();
228
+ float max_val = std::numeric_limits<float>::min();
229
+
230
+ for (int i = 0; i < net_width * net_height; ++i) {
231
+ float val = output_tmp.data_ptr<float>()[i];
232
+ if (min_val > val) min_val = val;
233
+ if (max_val < val) max_val = val;
234
+ }
235
+ float range_val = max_val - min_val;
236
+
237
+ output = output.sub(min_val).div(range_val).mul(255.0F).clamp(0, 255).to(torch::kF32); // .to(torch::kU8);
238
+
239
+ // resize to the original size if required
240
+ if (out_orig_size) {
241
+ try {
242
+ output = at::upsample_bilinear2d(output.unsqueeze(0), { cv_ptr->image.size().height, cv_ptr->image.size().width }, true);
243
+ output = output.squeeze(0);
244
+ }
245
+ catch (const c10::Error& e)
246
+ {
247
+ std::cout << " upsample_bilinear2d() exception: " << e.msg() << std::endl;
248
+ return;
249
+ }
250
+ }
251
+ output = output.permute({ 1, 2, 0 }).to(torch::kCPU);
252
+
253
+ int cv_type = CV_32FC1; // CV_8UC1;
254
+ auto cv_img = ToCvImage(output, cv_type);
255
+
256
+ sensor_msgs::Image img_msg;
257
+
258
+ try {
259
+ // cv::Mat -> sensor_msgs::Image
260
+ std_msgs::Header header; // empty header
261
+ header.seq = 0; // user defined counter
262
+ header.stamp = ros::Time::now();// time
263
+ //cv_bridge::CvImage img_bridge = cv_bridge::CvImage(header, sensor_msgs::image_encodings::MONO8, cv_img);
264
+ cv_bridge::CvImage img_bridge = cv_bridge::CvImage(header, sensor_msgs::image_encodings::TYPE_32FC1, cv_img);
265
+
266
+ img_bridge.toImageMsg(img_msg); // cv_bridge -> sensor_msgs::Image
267
+ }
268
+ catch (cv_bridge::Exception& e)
269
+ {
270
+ ROS_ERROR("cv_bridge exception: %s", e.what());
271
+ return;
272
+ }
273
+
274
+ // Output modified video stream
275
+ image_pub_.publish(img_msg);
276
+ }
277
+ };
278
+
279
+ int main(int argc, char** argv)
280
+ {
281
+ ros::init(argc, argv, "midas", ros::init_options::AnonymousName);
282
+ Midas ic;
283
+ ros::spin();
284
+ return 0;
285
+ }
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/README.md ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Towards Robust Monocular Depth Estimation: Mixing Datasets for Zero-shot Cross-dataset Transfer
2
+
3
+ ### TensorFlow inference using `.pb` and `.onnx` models
4
+
5
+ 1. [Run inference on TensorFlow-model by using TensorFlow](#run-inference-on-tensorflow-model-by-using-tensorFlow)
6
+
7
+ 2. [Run inference on ONNX-model by using TensorFlow](#run-inference-on-onnx-model-by-using-tensorflow)
8
+
9
+ 3. [Make ONNX model from downloaded Pytorch model file](#make-onnx-model-from-downloaded-pytorch-model-file)
10
+
11
+
12
+ ### Run inference on TensorFlow-model by using TensorFlow
13
+
14
+ 1) Download the model weights [model-f6b98070.pb](https://github.com/isl-org/MiDaS/releases/download/v2_1/model-f6b98070.pb)
15
+ and [model-small.pb](https://github.com/isl-org/MiDaS/releases/download/v2_1/model-small.pb) and place the
16
+ file in the `/tf/` folder.
17
+
18
+ 2) Set up dependencies:
19
+
20
+ ```shell
21
+ # install OpenCV
22
+ pip install --upgrade pip
23
+ pip install opencv-python
24
+
25
+ # install TensorFlow
26
+ pip install -I grpcio tensorflow==2.3.0 tensorflow-addons==0.11.2 numpy==1.18.0
27
+ ```
28
+
29
+ #### Usage
30
+
31
+ 1) Place one or more input images in the folder `tf/input`.
32
+
33
+ 2) Run the model:
34
+
35
+ ```shell
36
+ python tf/run_pb.py
37
+ ```
38
+
39
+ Or run the small model:
40
+
41
+ ```shell
42
+ python tf/run_pb.py --model_weights model-small.pb --model_type small
43
+ ```
44
+
45
+ 3) The resulting inverse depth maps are written to the `tf/output` folder.
46
+
47
+
48
+ ### Run inference on ONNX-model by using ONNX-Runtime
49
+
50
+ 1) Download the model weights [model-f6b98070.onnx](https://github.com/isl-org/MiDaS/releases/download/v2_1/model-f6b98070.onnx)
51
+ and [model-small.onnx](https://github.com/isl-org/MiDaS/releases/download/v2_1/model-small.onnx) and place the
52
+ file in the `/tf/` folder.
53
+
54
+ 2) Set up dependencies:
55
+
56
+ ```shell
57
+ # install OpenCV
58
+ pip install --upgrade pip
59
+ pip install opencv-python
60
+
61
+ # install ONNX
62
+ pip install onnx==1.7.0
63
+
64
+ # install ONNX Runtime
65
+ pip install onnxruntime==1.5.2
66
+ ```
67
+
68
+ #### Usage
69
+
70
+ 1) Place one or more input images in the folder `tf/input`.
71
+
72
+ 2) Run the model:
73
+
74
+ ```shell
75
+ python tf/run_onnx.py
76
+ ```
77
+
78
+ Or run the small model:
79
+
80
+ ```shell
81
+ python tf/run_onnx.py --model_weights model-small.onnx --model_type small
82
+ ```
83
+
84
+ 3) The resulting inverse depth maps are written to the `tf/output` folder.
85
+
86
+
87
+
88
+ ### Make ONNX model from downloaded Pytorch model file
89
+
90
+ 1) Download the model weights [model-f6b98070.pt](https://github.com/isl-org/MiDaS/releases/download/v2_1/model-f6b98070.pt) and place the
91
+ file in the root folder.
92
+
93
+ 2) Set up dependencies:
94
+
95
+ ```shell
96
+ # install OpenCV
97
+ pip install --upgrade pip
98
+ pip install opencv-python
99
+
100
+ # install PyTorch TorchVision
101
+ pip install -I torch==1.7.0 torchvision==0.8.0
102
+
103
+ # install TensorFlow
104
+ pip install -I grpcio tensorflow==2.3.0 tensorflow-addons==0.11.2 numpy==1.18.0
105
+
106
+ # install ONNX
107
+ pip install onnx==1.7.0
108
+
109
+ # install ONNX-TensorFlow
110
+ git clone https://github.com/onnx/onnx-tensorflow.git
111
+ cd onnx-tensorflow
112
+ git checkout 095b51b88e35c4001d70f15f80f31014b592b81e
113
+ pip install -e .
114
+ ```
115
+
116
+ #### Usage
117
+
118
+ 1) Run the converter:
119
+
120
+ ```shell
121
+ python tf/make_onnx_model.py
122
+ ```
123
+
124
+ 2) The resulting `model-f6b98070.onnx` file is written to the `/tf/` folder.
125
+
126
+
127
+ ### Requirements
128
+
129
+ The code was tested with Python 3.6.9, PyTorch 1.5.1, TensorFlow 2.2.0, TensorFlow-addons 0.8.3, ONNX 1.7.0, ONNX-TensorFlow (GitHub-master-17.07.2020) and OpenCV 4.3.0.
130
+
131
+ ### Citation
132
+
133
+ Please cite our paper if you use this code or any of the models:
134
+ ```
135
+ @article{Ranftl2019,
136
+ author = {Ren\'{e} Ranftl and Katrin Lasinger and David Hafner and Konrad Schindler and Vladlen Koltun},
137
+ title = {Towards Robust Monocular Depth Estimation: Mixing Datasets for Zero-shot Cross-dataset Transfer},
138
+ journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI)},
139
+ year = {2020},
140
+ }
141
+ ```
142
+
143
+ ### License
144
+
145
+ MIT License
146
+
147
+
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/make_onnx_model.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Compute depth maps for images in the input folder.
2
+ """
3
+ import os
4
+ import ntpath
5
+ import glob
6
+ import torch
7
+ import utils
8
+ import cv2
9
+ import numpy as np
10
+ from torchvision.transforms import Compose, Normalize
11
+ from torchvision import transforms
12
+
13
+ from shutil import copyfile
14
+ import fileinput
15
+ import sys
16
+ sys.path.append(os.getcwd() + '/..')
17
+
18
+ def modify_file():
19
+ modify_filename = '../midas/blocks.py'
20
+ copyfile(modify_filename, modify_filename+'.bak')
21
+
22
+ with open(modify_filename, 'r') as file :
23
+ filedata = file.read()
24
+
25
+ filedata = filedata.replace('align_corners=True', 'align_corners=False')
26
+ filedata = filedata.replace('import torch.nn as nn', 'import torch.nn as nn\nimport torchvision.models as models')
27
+ filedata = filedata.replace('torch.hub.load("facebookresearch/WSL-Images", "resnext101_32x8d_wsl")', 'models.resnext101_32x8d()')
28
+
29
+ with open(modify_filename, 'w') as file:
30
+ file.write(filedata)
31
+
32
+ def restore_file():
33
+ modify_filename = '../midas/blocks.py'
34
+ copyfile(modify_filename+'.bak', modify_filename)
35
+
36
+ modify_file()
37
+
38
+ from midas.midas_net import MidasNet
39
+ from midas.transforms import Resize, NormalizeImage, PrepareForNet
40
+
41
+ restore_file()
42
+
43
+
44
+ class MidasNet_preprocessing(MidasNet):
45
+ """Network for monocular depth estimation.
46
+ """
47
+ def forward(self, x):
48
+ """Forward pass.
49
+
50
+ Args:
51
+ x (tensor): input data (image)
52
+
53
+ Returns:
54
+ tensor: depth
55
+ """
56
+
57
+ mean = torch.tensor([0.485, 0.456, 0.406])
58
+ std = torch.tensor([0.229, 0.224, 0.225])
59
+ x.sub_(mean[None, :, None, None]).div_(std[None, :, None, None])
60
+
61
+ return MidasNet.forward(self, x)
62
+
63
+
64
+ def run(model_path):
65
+ """Run MonoDepthNN to compute depth maps.
66
+
67
+ Args:
68
+ model_path (str): path to saved model
69
+ """
70
+ print("initialize")
71
+
72
+ # select device
73
+
74
+ # load network
75
+ #model = MidasNet(model_path, non_negative=True)
76
+ model = MidasNet_preprocessing(model_path, non_negative=True)
77
+
78
+ model.eval()
79
+
80
+ print("start processing")
81
+
82
+ # input
83
+ img_input = np.zeros((3, 384, 384), np.float32)
84
+
85
+ # compute
86
+ with torch.no_grad():
87
+ sample = torch.from_numpy(img_input).unsqueeze(0)
88
+ prediction = model.forward(sample)
89
+ prediction = (
90
+ torch.nn.functional.interpolate(
91
+ prediction.unsqueeze(1),
92
+ size=img_input.shape[:2],
93
+ mode="bicubic",
94
+ align_corners=False,
95
+ )
96
+ .squeeze()
97
+ .cpu()
98
+ .numpy()
99
+ )
100
+
101
+ torch.onnx.export(model, sample, ntpath.basename(model_path).rsplit('.', 1)[0]+'.onnx', opset_version=9)
102
+
103
+ print("finished")
104
+
105
+
106
+ if __name__ == "__main__":
107
+ # set paths
108
+ # MODEL_PATH = "model.pt"
109
+ MODEL_PATH = "../model-f6b98070.pt"
110
+
111
+ # compute depth maps
112
+ run(MODEL_PATH)
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/run_pb.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Compute depth maps for images in the input folder.
2
+ """
3
+ import os
4
+ import glob
5
+ import utils
6
+ import cv2
7
+ import argparse
8
+
9
+ import tensorflow as tf
10
+
11
+ from transforms import Resize, NormalizeImage, PrepareForNet
12
+
13
+ def run(input_path, output_path, model_path, model_type="large"):
14
+ """Run MonoDepthNN to compute depth maps.
15
+
16
+ Args:
17
+ input_path (str): path to input folder
18
+ output_path (str): path to output folder
19
+ model_path (str): path to saved model
20
+ """
21
+ print("initialize")
22
+
23
+ # the runtime initialization will not allocate all memory on the device to avoid out of GPU memory
24
+ gpus = tf.config.experimental.list_physical_devices('GPU')
25
+ if gpus:
26
+ try:
27
+ for gpu in gpus:
28
+ #tf.config.experimental.set_memory_growth(gpu, True)
29
+ tf.config.experimental.set_virtual_device_configuration(gpu,
30
+ [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=4000)])
31
+ except RuntimeError as e:
32
+ print(e)
33
+
34
+ # network resolution
35
+ if model_type == "large":
36
+ net_w, net_h = 384, 384
37
+ elif model_type == "small":
38
+ net_w, net_h = 256, 256
39
+ else:
40
+ print(f"model_type '{model_type}' not implemented, use: --model_type large")
41
+ assert False
42
+
43
+ # load network
44
+ graph_def = tf.compat.v1.GraphDef()
45
+ with tf.io.gfile.GFile(model_path, 'rb') as f:
46
+ graph_def.ParseFromString(f.read())
47
+ tf.import_graph_def(graph_def, name='')
48
+
49
+
50
+ model_operations = tf.compat.v1.get_default_graph().get_operations()
51
+ input_node = '0:0'
52
+ output_layer = model_operations[len(model_operations) - 1].name + ':0'
53
+ print("Last layer name: ", output_layer)
54
+
55
+ resize_image = Resize(
56
+ net_w,
57
+ net_h,
58
+ resize_target=None,
59
+ keep_aspect_ratio=False,
60
+ ensure_multiple_of=32,
61
+ resize_method="upper_bound",
62
+ image_interpolation_method=cv2.INTER_CUBIC,
63
+ )
64
+
65
+ def compose2(f1, f2):
66
+ return lambda x: f2(f1(x))
67
+
68
+ transform = compose2(resize_image, PrepareForNet())
69
+
70
+ # get input
71
+ img_names = glob.glob(os.path.join(input_path, "*"))
72
+ num_images = len(img_names)
73
+
74
+ # create output folder
75
+ os.makedirs(output_path, exist_ok=True)
76
+
77
+ print("start processing")
78
+
79
+ with tf.compat.v1.Session() as sess:
80
+ try:
81
+ # load images
82
+ for ind, img_name in enumerate(img_names):
83
+
84
+ print(" processing {} ({}/{})".format(img_name, ind + 1, num_images))
85
+
86
+ # input
87
+ img = utils.read_image(img_name)
88
+ img_input = transform({"image": img})["image"]
89
+
90
+ # compute
91
+ prob_tensor = sess.graph.get_tensor_by_name(output_layer)
92
+ prediction, = sess.run(prob_tensor, {input_node: [img_input] })
93
+ prediction = prediction.reshape(net_h, net_w)
94
+ prediction = cv2.resize(prediction, (img.shape[1], img.shape[0]), interpolation=cv2.INTER_CUBIC)
95
+
96
+ # output
97
+ filename = os.path.join(
98
+ output_path, os.path.splitext(os.path.basename(img_name))[0]
99
+ )
100
+ utils.write_depth(filename, prediction, bits=2)
101
+
102
+ except KeyError:
103
+ print ("Couldn't find input node: ' + input_node + ' or output layer: " + output_layer + ".")
104
+ exit(-1)
105
+
106
+ print("finished")
107
+
108
+
109
+ if __name__ == "__main__":
110
+ parser = argparse.ArgumentParser()
111
+
112
+ parser.add_argument('-i', '--input_path',
113
+ default='input',
114
+ help='folder with input images'
115
+ )
116
+
117
+ parser.add_argument('-o', '--output_path',
118
+ default='output',
119
+ help='folder for output images'
120
+ )
121
+
122
+ parser.add_argument('-m', '--model_weights',
123
+ default='model-f6b98070.pb',
124
+ help='path to the trained weights of model'
125
+ )
126
+
127
+ parser.add_argument('-t', '--model_type',
128
+ default='large',
129
+ help='model type: large or small'
130
+ )
131
+
132
+ args = parser.parse_args()
133
+
134
+ # compute depth maps
135
+ run(args.input_path, args.output_path, args.model_weights, args.model_type)
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/transforms.py ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import cv2
3
+ import math
4
+
5
+
6
+ def apply_min_size(sample, size, image_interpolation_method=cv2.INTER_AREA):
7
+ """Rezise the sample to ensure the given size. Keeps aspect ratio.
8
+
9
+ Args:
10
+ sample (dict): sample
11
+ size (tuple): image size
12
+
13
+ Returns:
14
+ tuple: new size
15
+ """
16
+ shape = list(sample["disparity"].shape)
17
+
18
+ if shape[0] >= size[0] and shape[1] >= size[1]:
19
+ return sample
20
+
21
+ scale = [0, 0]
22
+ scale[0] = size[0] / shape[0]
23
+ scale[1] = size[1] / shape[1]
24
+
25
+ scale = max(scale)
26
+
27
+ shape[0] = math.ceil(scale * shape[0])
28
+ shape[1] = math.ceil(scale * shape[1])
29
+
30
+ # resize
31
+ sample["image"] = cv2.resize(
32
+ sample["image"], tuple(shape[::-1]), interpolation=image_interpolation_method
33
+ )
34
+
35
+ sample["disparity"] = cv2.resize(
36
+ sample["disparity"], tuple(shape[::-1]), interpolation=cv2.INTER_NEAREST
37
+ )
38
+ sample["mask"] = cv2.resize(
39
+ sample["mask"].astype(np.float32),
40
+ tuple(shape[::-1]),
41
+ interpolation=cv2.INTER_NEAREST,
42
+ )
43
+ sample["mask"] = sample["mask"].astype(bool)
44
+
45
+ return tuple(shape)
46
+
47
+
48
+ class Resize(object):
49
+ """Resize sample to given size (width, height).
50
+ """
51
+
52
+ def __init__(
53
+ self,
54
+ width,
55
+ height,
56
+ resize_target=True,
57
+ keep_aspect_ratio=False,
58
+ ensure_multiple_of=1,
59
+ resize_method="lower_bound",
60
+ image_interpolation_method=cv2.INTER_AREA,
61
+ ):
62
+ """Init.
63
+
64
+ Args:
65
+ width (int): desired output width
66
+ height (int): desired output height
67
+ resize_target (bool, optional):
68
+ True: Resize the full sample (image, mask, target).
69
+ False: Resize image only.
70
+ Defaults to True.
71
+ keep_aspect_ratio (bool, optional):
72
+ True: Keep the aspect ratio of the input sample.
73
+ Output sample might not have the given width and height, and
74
+ resize behaviour depends on the parameter 'resize_method'.
75
+ Defaults to False.
76
+ ensure_multiple_of (int, optional):
77
+ Output width and height is constrained to be multiple of this parameter.
78
+ Defaults to 1.
79
+ resize_method (str, optional):
80
+ "lower_bound": Output will be at least as large as the given size.
81
+ "upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.)
82
+ "minimal": Scale as least as possible. (Output size might be smaller than given size.)
83
+ Defaults to "lower_bound".
84
+ """
85
+ self.__width = width
86
+ self.__height = height
87
+
88
+ self.__resize_target = resize_target
89
+ self.__keep_aspect_ratio = keep_aspect_ratio
90
+ self.__multiple_of = ensure_multiple_of
91
+ self.__resize_method = resize_method
92
+ self.__image_interpolation_method = image_interpolation_method
93
+
94
+ def constrain_to_multiple_of(self, x, min_val=0, max_val=None):
95
+ y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int)
96
+
97
+ if max_val is not None and y > max_val:
98
+ y = (np.floor(x / self.__multiple_of) * self.__multiple_of).astype(int)
99
+
100
+ if y < min_val:
101
+ y = (np.ceil(x / self.__multiple_of) * self.__multiple_of).astype(int)
102
+
103
+ return y
104
+
105
+ def get_size(self, width, height):
106
+ # determine new height and width
107
+ scale_height = self.__height / height
108
+ scale_width = self.__width / width
109
+
110
+ if self.__keep_aspect_ratio:
111
+ if self.__resize_method == "lower_bound":
112
+ # scale such that output size is lower bound
113
+ if scale_width > scale_height:
114
+ # fit width
115
+ scale_height = scale_width
116
+ else:
117
+ # fit height
118
+ scale_width = scale_height
119
+ elif self.__resize_method == "upper_bound":
120
+ # scale such that output size is upper bound
121
+ if scale_width < scale_height:
122
+ # fit width
123
+ scale_height = scale_width
124
+ else:
125
+ # fit height
126
+ scale_width = scale_height
127
+ elif self.__resize_method == "minimal":
128
+ # scale as least as possbile
129
+ if abs(1 - scale_width) < abs(1 - scale_height):
130
+ # fit width
131
+ scale_height = scale_width
132
+ else:
133
+ # fit height
134
+ scale_width = scale_height
135
+ else:
136
+ raise ValueError(
137
+ f"resize_method {self.__resize_method} not implemented"
138
+ )
139
+
140
+ if self.__resize_method == "lower_bound":
141
+ new_height = self.constrain_to_multiple_of(
142
+ scale_height * height, min_val=self.__height
143
+ )
144
+ new_width = self.constrain_to_multiple_of(
145
+ scale_width * width, min_val=self.__width
146
+ )
147
+ elif self.__resize_method == "upper_bound":
148
+ new_height = self.constrain_to_multiple_of(
149
+ scale_height * height, max_val=self.__height
150
+ )
151
+ new_width = self.constrain_to_multiple_of(
152
+ scale_width * width, max_val=self.__width
153
+ )
154
+ elif self.__resize_method == "minimal":
155
+ new_height = self.constrain_to_multiple_of(scale_height * height)
156
+ new_width = self.constrain_to_multiple_of(scale_width * width)
157
+ else:
158
+ raise ValueError(f"resize_method {self.__resize_method} not implemented")
159
+
160
+ return (new_width, new_height)
161
+
162
+ def __call__(self, sample):
163
+ width, height = self.get_size(
164
+ sample["image"].shape[1], sample["image"].shape[0]
165
+ )
166
+
167
+ # resize sample
168
+ sample["image"] = cv2.resize(
169
+ sample["image"],
170
+ (width, height),
171
+ interpolation=self.__image_interpolation_method,
172
+ )
173
+
174
+ if self.__resize_target:
175
+ if "disparity" in sample:
176
+ sample["disparity"] = cv2.resize(
177
+ sample["disparity"],
178
+ (width, height),
179
+ interpolation=cv2.INTER_NEAREST,
180
+ )
181
+
182
+ if "depth" in sample:
183
+ sample["depth"] = cv2.resize(
184
+ sample["depth"], (width, height), interpolation=cv2.INTER_NEAREST
185
+ )
186
+
187
+ sample["mask"] = cv2.resize(
188
+ sample["mask"].astype(np.float32),
189
+ (width, height),
190
+ interpolation=cv2.INTER_NEAREST,
191
+ )
192
+ sample["mask"] = sample["mask"].astype(bool)
193
+
194
+ return sample
195
+
196
+
197
+ class NormalizeImage(object):
198
+ """Normlize image by given mean and std.
199
+ """
200
+
201
+ def __init__(self, mean, std):
202
+ self.__mean = mean
203
+ self.__std = std
204
+
205
+ def __call__(self, sample):
206
+ sample["image"] = (sample["image"] - self.__mean) / self.__std
207
+
208
+ return sample
209
+
210
+
211
+ class PrepareForNet(object):
212
+ """Prepare sample for usage as network input.
213
+ """
214
+
215
+ def __init__(self):
216
+ pass
217
+
218
+ def __call__(self, sample):
219
+ image = np.transpose(sample["image"], (2, 0, 1))
220
+ sample["image"] = np.ascontiguousarray(image).astype(np.float32)
221
+
222
+ if "mask" in sample:
223
+ sample["mask"] = sample["mask"].astype(np.float32)
224
+ sample["mask"] = np.ascontiguousarray(sample["mask"])
225
+
226
+ if "disparity" in sample:
227
+ disparity = sample["disparity"].astype(np.float32)
228
+ sample["disparity"] = np.ascontiguousarray(disparity)
229
+
230
+ if "depth" in sample:
231
+ depth = sample["depth"].astype(np.float32)
232
+ sample["depth"] = np.ascontiguousarray(depth)
233
+
234
+ return sample
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/tf/utils.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import sys
3
+ import cv2
4
+
5
+
6
+ def write_pfm(path, image, scale=1):
7
+ """Write pfm file.
8
+ Args:
9
+ path (str): pathto file
10
+ image (array): data
11
+ scale (int, optional): Scale. Defaults to 1.
12
+ """
13
+
14
+ with open(path, "wb") as file:
15
+ color = None
16
+
17
+ if image.dtype.name != "float32":
18
+ raise Exception("Image dtype must be float32.")
19
+
20
+ image = np.flipud(image)
21
+
22
+ if len(image.shape) == 3 and image.shape[2] == 3: # color image
23
+ color = True
24
+ elif (
25
+ len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1
26
+ ): # greyscale
27
+ color = False
28
+ else:
29
+ raise Exception("Image must have H x W x 3, H x W x 1 or H x W dimensions.")
30
+
31
+ file.write("PF\n" if color else "Pf\n".encode())
32
+ file.write("%d %d\n".encode() % (image.shape[1], image.shape[0]))
33
+
34
+ endian = image.dtype.byteorder
35
+
36
+ if endian == "<" or endian == "=" and sys.byteorder == "little":
37
+ scale = -scale
38
+
39
+ file.write("%f\n".encode() % scale)
40
+
41
+ image.tofile(file)
42
+
43
+ def read_image(path):
44
+ """Read image and output RGB image (0-1).
45
+ Args:
46
+ path (str): path to file
47
+ Returns:
48
+ array: RGB image (0-1)
49
+ """
50
+ img = cv2.imread(path)
51
+
52
+ if img.ndim == 2:
53
+ img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
54
+
55
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) / 255.0
56
+
57
+ return img
58
+
59
+ def write_depth(path, depth, bits=1):
60
+ """Write depth map to pfm and png file.
61
+ Args:
62
+ path (str): filepath without extension
63
+ depth (array): depth
64
+ """
65
+ write_pfm(path + ".pfm", depth.astype(np.float32))
66
+
67
+ depth_min = depth.min()
68
+ depth_max = depth.max()
69
+
70
+ max_val = (2**(8*bits))-1
71
+
72
+ if depth_max - depth_min > np.finfo("float").eps:
73
+ out = max_val * (depth - depth_min) / (depth_max - depth_min)
74
+ else:
75
+ out = 0
76
+
77
+ if bits == 1:
78
+ cv2.imwrite(path + ".png", out.astype("uint8"))
79
+ elif bits == 2:
80
+ cv2.imwrite(path + ".png", out.astype("uint16"))
81
+
82
+ return