Moyao001 commited on
Commit
f8e201b
·
verified ·
1 Parent(s): 6e316f5

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. CCEdit-main/src/controlnet11/README.md +620 -0
  2. CCEdit-main/src/controlnet11/annotator/util.py +98 -0
  3. CCEdit-main/src/controlnet11/annotator/zoe/LICENSE +21 -0
  4. CCEdit-main/src/controlnet11/annotator/zoe/__init__.py +49 -0
  5. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/__init__.py +24 -0
  6. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/__init__.py +24 -0
  7. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas.py +379 -0
  8. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/.gitignore +110 -0
  9. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/Dockerfile +29 -0
  10. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/environment.yaml +16 -0
  11. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/ios/.gitignore +2 -0
  12. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/ios/LICENSE +201 -0
  13. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/ios/Midas/AppDelegate.swift +41 -0
  14. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/ios/Midas/Assets.xcassets/AppIcon.appiconset/Contents.json +1 -0
  15. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/ios/Midas/Assets.xcassets/Contents.json +6 -0
  16. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/ios/Midas/Camera Feed/CameraFeedManager.swift +316 -0
  17. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/ios/Midas/Camera Feed/PreviewView.swift +39 -0
  18. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/ios/Midas/Cells/InfoCell.swift +21 -0
  19. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/ios/Midas/Constants.swift +25 -0
  20. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/ios/Midas/Extensions/CGSizeExtension.swift +45 -0
  21. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/ios/Midas/Extensions/CVPixelBufferExtension.swift +172 -0
  22. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/ios/Midas/Extensions/TFLiteExtension.swift +75 -0
  23. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/ios/Midas/Info.plist +42 -0
  24. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/ios/Midas/ModelDataHandler/ModelDataHandler.swift +464 -0
  25. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/ios/Midas/Storyboards/Base.lproj/Launch Screen.storyboard +48 -0
  26. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/ios/Midas/Storyboards/Base.lproj/Main.storyboard +236 -0
  27. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/ios/Midas/ViewControllers/ViewController.swift +489 -0
  28. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/ios/Midas/Views/OverlayView.swift +63 -0
  29. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/ios/README.md +105 -0
  30. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/ios/RunScripts/download_models.sh +14 -0
  31. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/utils.py +199 -0
  32. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/builder.py +51 -0
  33. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/depth_model.py +152 -0
  34. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/model_io.py +92 -0
  35. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/zoedepth/__init__.py +31 -0
  36. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/zoedepth/__pycache__/__init__.cpython-39.pyc +0 -0
  37. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/zoedepth/__pycache__/zoedepth_v1.cpython-39.pyc +0 -0
  38. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/zoedepth/config_zoedepth.json +58 -0
  39. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/zoedepth/config_zoedepth_kitti.json +22 -0
  40. CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/zoedepth/zoedepth_v1.py +250 -0
  41. CCEdit-main/src/controlnet11/gradio_annotator.py +376 -0
  42. CCEdit-main/src/controlnet11/gradio_inpaint.py +122 -0
  43. CCEdit-main/src/controlnet11/gradio_ip2p.py +99 -0
  44. CCEdit-main/src/controlnet11/gradio_lineart.py +112 -0
  45. CCEdit-main/src/controlnet11/gradio_mlsd.py +115 -0
  46. CCEdit-main/src/controlnet11/gradio_seg.py +120 -0
  47. CCEdit-main/src/controlnet11/gradio_shuffle.py +105 -0
  48. CCEdit-main/src/controlnet11/models/cldm_v15.yaml +79 -0
  49. CCEdit-main/src/controlnet11/models/cldm_v15_avg_pool.yaml +80 -0
  50. CCEdit-main/src/controlnet11/models/cldm_v21.yaml +85 -0
CCEdit-main/src/controlnet11/README.md ADDED
@@ -0,0 +1,620 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ControlNet 1.1
2
+
3
+ This is the official release of ControlNet 1.1.
4
+
5
+ ControlNet 1.1 has the exactly same architecture with ControlNet 1.0.
6
+
7
+ We promise that we will not change the neural network architecture before ControlNet 1.5 (at least, and hopefully we will never change the network architecture). Perhaps this is the best news in ControlNet 1.1.
8
+
9
+ ControlNet 1.1 includes all previous models with improved robustness and result quality. Several new models are added.
10
+
11
+ Note that we are still working on [updating this to A1111](https://github.com/Mikubill/sd-webui-controlnet/issues/736).
12
+
13
+ This repo will be merged to [ControlNet](https://github.com/lllyasviel/ControlNet) after we make sure that everything is OK.
14
+
15
+ **Note that we are actively editing this page now. The information in this page will be more detailed and finalized when ControlNet 1.1 is ready.**
16
+
17
+ # This Github Repo is NOT an A1111 Extension
18
+
19
+ Please do not copy the URL of this repo into your A1111.
20
+
21
+ If you want to use ControlNet 1.1 in A1111, you only need to install https://github.com/Mikubill/sd-webui-controlnet , and only follow the instructions in that page.
22
+
23
+ This project is for research use and academic experiments. Again, do NOT install "ControlNet-v1-1-nightly" into your A1111.
24
+
25
+ # How to use ControlNet 1.1 in A1111?
26
+
27
+ The Beta Test for A1111 Is Started.
28
+
29
+ The A1111 plugin is: https://github.com/Mikubill/sd-webui-controlnet
30
+
31
+ Note that if you use A1111, you only need to follow the instructions in the above link. (You can ignore all installation steps in this page if you use A1111.)
32
+
33
+ **For researchers who are not familiar with A1111:** The A1111 plugin supports arbitrary combination of arbitrary number of ControlNets, arbitrary community models, arbitrary LoRAs, and arbitrary sampling methods! We should definitely try it!
34
+
35
+ Note that our official support for “Multi-ControlNet” is A1111-only. Please use [Automatic1111 with Multi-ControlNet](https://github.com/Mikubill/sd-webui-controlnet#Multi-ControlNet) if you want to use multiple ControlNets at the same time. The ControlNet project perfectly supports combining multiple ControlNets, and all production-ready ControlNets are extensively tested with multiple ControlNets combined.
36
+
37
+ # Model Specification
38
+
39
+ Starting from ControlNet 1.1, we begin to use the Standard ControlNet Naming Rules (SCNNRs) to name all models. We hope that this naming rule can improve the user experience.
40
+
41
+ ![img](github_docs/imgs/spec.png)
42
+
43
+ ControlNet 1.1 include 14 models (11 production-ready models and 3 experimental models):
44
+
45
+ control_v11p_sd15_canny
46
+ control_v11p_sd15_mlsd
47
+ control_v11f1p_sd15_depth
48
+ control_v11p_sd15_normalbae
49
+ control_v11p_sd15_seg
50
+ control_v11p_sd15_inpaint
51
+ control_v11p_sd15_lineart
52
+ control_v11p_sd15s2_lineart_anime
53
+ control_v11p_sd15_openpose
54
+ control_v11p_sd15_scribble
55
+ control_v11p_sd15_softedge
56
+ control_v11e_sd15_shuffle
57
+ control_v11e_sd15_ip2p
58
+ control_v11f1e_sd15_tile
59
+
60
+ You can download all those models from our [HuggingFace Model Page](https://huggingface.co/lllyasviel/ControlNet-v1-1/tree/main). All these models should be put in the folder "models".
61
+
62
+ You need to download Stable Diffusion 1.5 model ["v1-5-pruned.ckpt"](https://huggingface.co/runwayml/stable-diffusion-v1-5/tree/main) and put it in the folder "models".
63
+
64
+ Our python codes will automatically download other annotator models like HED and OpenPose. Nevertheless, if you want to manually download these, you can download all other annotator models from [here](https://huggingface.co/lllyasviel/Annotators/tree/main). All these models should be put in folder "annotator/ckpts".
65
+
66
+ To install:
67
+
68
+ conda env create -f environment.yaml
69
+ conda activate control-v11
70
+
71
+ Note that if you use 8GB GPU, you need to set "save_memory = True" in "config.py".
72
+
73
+ ## ControlNet 1.1 Depth
74
+
75
+ Control Stable Diffusion with Depth Maps.
76
+
77
+ Model file: control_v11f1p_sd15_depth.pth
78
+
79
+ Config file: control_v11f1p_sd15_depth.yaml
80
+
81
+ Training data: Midas depth (resolution 256/384/512) + Leres Depth (resolution 256/384/512) + Zoe Depth (resolution 256/384/512). Multiple depth map generator at multiple resolution as data augmentation.
82
+
83
+ Acceptable Preprocessors: Depth_Midas, Depth_Leres, Depth_Zoe. This model is highly robust and can work on real depth map from rendering engines.
84
+
85
+ python gradio_depth.py
86
+
87
+ Non-cherry-picked batch test with random seed 12345 ("a handsome man"):
88
+
89
+ ![img](github_docs/imgs/depth_1.png)
90
+
91
+ **Update**
92
+
93
+ 2023/04/14: 72 hours ago we uploaded a wrong model "control_v11p_sd15_depth" by mistake. That model is an intermediate checkpoint during the training. That model is not converged and may cause distortion in results. We uploaded the correct depth model as "control_v11f1p_sd15_depth". The "f1" means bug fix 1. The incorrect model is removed. Sorry for the inconvenience.
94
+
95
+ **Improvements in Depth 1.1:**
96
+
97
+ 1. The training dataset of previous cnet 1.0 has several problems including (1) a small group of greyscale human images are duplicated thousands of times (!!), causing the previous model somewhat likely to generate grayscale human images; (2) some images has low quality, very blurry, or significant JPEG artifacts; (3) a small group of images has wrong paired prompts caused by a mistake in our data processing scripts. The new model fixed all problems of the training dataset and should be more reasonable in many cases.
98
+ 2. The new depth model is a relatively unbiased model. It is not trained with some specific type of depth by some specific depth estimation method. It is not over-fitted to one preprocessor. This means this model will work better with different depth estimation, different preprocessor resolutions, or even with real depth created by 3D engines.
99
+ 3. Some reasonable data augmentations are applied to training, like random left-right flipping.
100
+ 4. The model is resumed from depth 1.0, and it should work well in all cases where depth 1.0 works well. If not, please open an issue with image, and we will take a look at your case. Depth 1.1 works well in many failure cases of depth 1.0.
101
+ 5. If you use Midas depth (the "depth" in webui plugin) with 384 preprocessor resolution, the difference between depth 1.0 and 1.1 should be minimal. However, if you try other preprocessor resolutions or other preprocessors (like leres and zoe), the depth 1.1 is expected to be a bit better than 1.0.
102
+
103
+ ## ControlNet 1.1 Normal
104
+
105
+ Control Stable Diffusion with Normal Maps.
106
+
107
+ Model file: control_v11p_sd15_normalbae.pth
108
+
109
+ Config file: control_v11p_sd15_normalbae.yaml
110
+
111
+ Training data: [Bae's](https://github.com/baegwangbin/surface_normal_uncertainty) normalmap estimation method.
112
+
113
+ Acceptable Preprocessors: Normal BAE. This model can accept normal maps from rendering engines as long as the normal map follows [ScanNet's](http://www.scan-net.org/) protocol. That is to say, the color of your normal map should look like [the second column of this image](https://raw.githubusercontent.com/baegwangbin/surface_normal_uncertainty/main/figs/readme_scannet.png).
114
+
115
+ Note that this method is much more reasonable than the normal-from-midas method in ControlNet 1.0. The previous method will be abandoned.
116
+
117
+ python gradio_normalbae.py
118
+
119
+ Non-cherry-picked batch test with random seed 12345 ("a man made of flowers"):
120
+
121
+ ![img](github_docs/imgs/normal_1.png)
122
+
123
+ Non-cherry-picked batch test with random seed 12345 ("room"):
124
+
125
+ ![img](github_docs/imgs/normal_2.png)
126
+
127
+ **Improvements in Normal 1.1:**
128
+
129
+ 1. The normal-from-midas method in Normal 1.0 is neither reasonable nor physically correct. That method does not work very well in many images. The normal 1.0 model cannot interpret real normal maps created by rendering engines.
130
+ 2. This Normal 1.1 is much more reasonable because the preprocessor is trained to estimate normal maps with a relatively correct protocol (NYU-V2's visualization method). This means the Normal 1.1 can interpret real normal maps from rendering engines as long as the colors are correct (blue is front, red is left, green is top).
131
+ 3. In our test, this model is robust and can achieve similar performance to the depth model. In previous CNET 1.0, the Normal 1.0 is not very frequently used. But this Normal 2.0 is much improved and has potential to be used much more frequently.
132
+
133
+ ## ControlNet 1.1 Canny
134
+
135
+ Control Stable Diffusion with Canny Maps.
136
+
137
+ Model file: control_v11p_sd15_canny.pth
138
+
139
+ Config file: control_v11p_sd15_canny.yaml
140
+
141
+ Training data: Canny with random thresholds.
142
+
143
+ Acceptable Preprocessors: Canny.
144
+
145
+ We fixed several problems in previous training datasets.
146
+
147
+ python gradio_canny.py
148
+
149
+ Non-cherry-picked batch test with random seed 12345 ("dog in a room"):
150
+
151
+ ![img](github_docs/imgs/canny_1.png)
152
+
153
+ **Improvements in Canny 1.1:**
154
+
155
+ 1. The training dataset of previous cnet 1.0 has several problems including (1) a small group of greyscale human images are duplicated thousands of times (!!), causing the previous model somewhat likely to generate grayscale human images; (2) some images has low quality, very blurry, or significant JPEG artifacts; (3) a small group of images has wrong paired prompts caused by a mistake in our data processing scripts. The new model fixed all problems of the training dataset and should be more reasonable in many cases.
156
+ 2. Because the Canny model is one of the most important (perhaps the most frequently used) ControlNet, we used a fund to train it on a machine with 8 Nvidia A100 80G with batchsize 8×32=256 for 3 days, spending 72×30=2160 USD (8 A100 80G with 30 USD/hour). The model is resumed from Canny 1.0.
157
+ 3. Some reasonable data augmentations are applied to training, like random left-right flipping.
158
+ 4. Although it is difficult to evaluate a ControlNet, we find Canny 1.1 is a bit more robust and a bit higher visual quality than Canny 1.0.
159
+
160
+ ## ControlNet 1.1 MLSD
161
+
162
+ Control Stable Diffusion with M-LSD straight lines.
163
+
164
+ Model file: control_v11p_sd15_mlsd.pth
165
+
166
+ Config file: control_v11p_sd15_mlsd.yaml
167
+
168
+ Training data: M-LSD Lines.
169
+
170
+ Acceptable Preprocessors: MLSD.
171
+
172
+ We fixed several problems in previous training datasets. The model is resumed from ControlNet 1.0 and trained with 200 GPU hours of A100 80G.
173
+
174
+ python gradio_mlsd.py
175
+
176
+ Non-cherry-picked batch test with random seed 12345 ("room"):
177
+
178
+ ![img](github_docs/imgs/mlsd_1.png)
179
+
180
+ **Improvements in MLSD 1.1:**
181
+
182
+ 1. The training dataset of previous cnet 1.0 has several problems including (1) a small group of greyscale human images are duplicated thousands of times (!!), causing the previous model somewhat likely to generate grayscale human images; (2) some images has low quality, very blurry, or significant JPEG artifacts; (3) a small group of images has wrong paired prompts caused by a mistake in our data processing scripts. The new model fixed all problems of the training dataset and should be more reasonable in many cases.
183
+ 2. We enlarged the training dataset by adding 300K more images by using MLSD to find images with more than 16 straight lines in it.
184
+ 3. Some reasonable data augmentations are applied to training, like random left-right flipping.
185
+ 4. Resumed from MLSD 1.0 with continued training with 200 GPU hours of A100 80G.
186
+
187
+ ## ControlNet 1.1 Scribble
188
+
189
+ Control Stable Diffusion with Scribbles.
190
+
191
+ Model file: control_v11p_sd15_scribble.pth
192
+
193
+ Config file: control_v11p_sd15_scribble.yaml
194
+
195
+ Training data: Synthesized scribbles.
196
+
197
+ Acceptable Preprocessors: Synthesized scribbles (Scribble_HED, Scribble_PIDI, etc.) or hand-drawn scribbles.
198
+
199
+ We fixed several problems in previous training datasets. The model is resumed from ControlNet 1.0 and trained with 200 GPU hours of A100 80G.
200
+
201
+ # To test synthesized scribbles
202
+ python gradio_scribble.py
203
+ # To test hand-drawn scribbles in an interactive demo
204
+ python gradio_interactive.py
205
+
206
+ Non-cherry-picked batch test with random seed 12345 ("man in library"):
207
+
208
+ ![img](github_docs/imgs/scribble_1.png)
209
+
210
+ Non-cherry-picked batch test with random seed 12345 (interactive, "the beautiful landscape"):
211
+
212
+ ![img](github_docs/imgs/scribble_2.png)
213
+
214
+ **Improvements in Scribble 1.1:**
215
+
216
+ 1. The training dataset of previous cnet 1.0 has several problems including (1) a small group of greyscale human images are duplicated thousands of times (!!), causing the previous model somewhat likely to generate grayscale human images; (2) some images has low quality, very blurry, or significant JPEG artifacts; (3) a small group of images has wrong paired prompts caused by a mistake in our data processing scripts. The new model fixed all problems of the training dataset and should be more reasonable in many cases.
217
+ 2. We find out that users sometimes like to draw very thick scribbles. Because of that, we used more aggressive random morphological transforms to synthesize scribbles. This model should work well even when the scribbles are relatively thick (the maximum width of training data is 24-pixel-width scribble in a 512 canvas, but it seems to work well even for a bit wider scribbles; the minimum width is 1 pixel).
218
+ 3. Resumed from Scribble 1.0, continued with 200 GPU hours of A100 80G.
219
+
220
+ ## ControlNet 1.1 Soft Edge
221
+
222
+ Control Stable Diffusion with Soft Edges.
223
+
224
+ Model file: control_v11p_sd15_softedge.pth
225
+
226
+ Config file: control_v11p_sd15_softedge.yaml
227
+
228
+ Training data: SoftEdge_PIDI, SoftEdge_PIDI_safe, SoftEdge_HED, SoftEdge_HED_safe.
229
+
230
+ Acceptable Preprocessors: SoftEdge_PIDI, SoftEdge_PIDI_safe, SoftEdge_HED, SoftEdge_HED_safe.
231
+
232
+ This model is significantly improved compared to previous model. All users should update as soon as possible.
233
+
234
+ New in ControlNet 1.1: now we added a new type of soft edge called "SoftEdge_safe". This is motivated by the fact that HED or PIDI tends to hide a corrupted greyscale version of the original image inside the soft estimation, and such hidden patterns can distract ControlNet, leading to bad results. The solution is to use a pre-processing to quantize the edge maps into several levels so that the hidden patterns can be completely removed. The implementation is [in the 78-th line of annotator/util.py](https://github.com/lllyasviel/ControlNet-v1-1-nightly/blob/4c9560ebe7679daac53a0599a11b9b7cd984ac55/annotator/util.py#L78).
235
+
236
+ The perforamce can be roughly noted as:
237
+
238
+ Robustness: SoftEdge_PIDI_safe > SoftEdge_HED_safe >> SoftEdge_PIDI > SoftEdge_HED
239
+
240
+ Maximum result quality: SoftEdge_HED > SoftEdge_PIDI > SoftEdge_HED_safe > SoftEdge_PIDI_safe
241
+
242
+ Considering the trade-off, we recommend to use SoftEdge_PIDI by default. In most cases it works very well.
243
+
244
+ python gradio_softedge.py
245
+
246
+ Non-cherry-picked batch test with random seed 12345 ("a handsome man"):
247
+
248
+ ![img](github_docs/imgs/softedge_1.png)
249
+
250
+ **Improvements in Soft Edge 1.1:**
251
+
252
+ 1. Soft Edge 1.1 was called HED 1.0 in previous ControlNet.
253
+ 2. The training dataset of previous cnet 1.0 has several problems including (1) a small group of greyscale human images are duplicated thousands of times (!!), causing the previous model somewhat likely to generate grayscale human images; (2) some images has low quality, very blurry, or significant JPEG artifacts; (3) a small group of images has wrong paired prompts caused by a mistake in our data processing scripts. The new model fixed all problems of the training dataset and should be more reasonable in many cases.
254
+ 3. The Soft Edge 1.1 is significantly (in nealy 100\% cases) better than HED 1.0. This is mainly because HED or PIDI estimator tend to hide a corrupted greyscale version of original image inside the soft edge map and the previous model HED 1.0 is over-fitted to restore that hidden corrupted image rather than perform boundary-aware diffusion. The training of Soft Edge 1.1 used 75\% "safe" filtering to remove such hidden corrupted greyscale images insider control maps. This makes the Soft Edge 1.1 very robust. In out test, Soft Edge 1.1 is as usable as the depth model and has potential to be more frequently used.
255
+
256
+ ## ControlNet 1.1 Segmentation
257
+
258
+ Control Stable Diffusion with Semantic Segmentation.
259
+
260
+ Model file: control_v11p_sd15_seg.pth
261
+
262
+ Config file: control_v11p_sd15_seg.yaml
263
+
264
+ Training data: COCO + ADE20K.
265
+
266
+ Acceptable Preprocessors: Seg_OFADE20K (Oneformer ADE20K), Seg_OFCOCO (Oneformer COCO), Seg_UFADE20K (Uniformer ADE20K), or manually created masks.
267
+
268
+ Now the model can receive both type of ADE20K or COCO annotations. We find that recognizing the segmentation protocol is trivial for the ControlNet encoder and training the model of multiple segmentation protocols lead to better performance.
269
+
270
+ python gradio_seg.py
271
+
272
+ Non-cherry-picked batch test with random seed 12345 (ADE20k protocol, "house"):
273
+
274
+ ![img](github_docs/imgs/seg_1.png)
275
+
276
+ Non-cherry-picked batch test with random seed 12345 (COCO protocol, "house"):
277
+
278
+ ![img](github_docs/imgs/seg_2.png)
279
+
280
+ **Improvements in Segmentation 1.1:**
281
+
282
+ 1. COCO protocol is supported. The previous Segmentation 1.0 supports about 150 colors, but Segmentation 1.1 supports another 182 colors from coco.
283
+ 2. Resumed from Segmentation 1.0. All previous inputs should still work.
284
+
285
+ ## ControlNet 1.1 Openpose
286
+
287
+ Control Stable Diffusion with Openpose.
288
+
289
+ Model file: control_v11p_sd15_openpose.pth
290
+
291
+ Config file: control_v11p_sd15_openpose.yaml
292
+
293
+ The model is trained and can accept the following combinations:
294
+
295
+ * Openpose body
296
+ * Openpose hand
297
+ * Openpose face
298
+ * Openpose body + Openpose hand
299
+ * Openpose body + Openpose face
300
+ * Openpose hand + Openpose face
301
+ * Openpose body + Openpose hand + Openpose face
302
+
303
+ However, providing all those combinations is too complicated. We recommend to provide the users with only two choices:
304
+
305
+ * "Openpose" = Openpose body
306
+ * "Openpose Full" = Openpose body + Openpose hand + Openpose face
307
+
308
+ You can try with the demo:
309
+
310
+ python gradio_openpose.py
311
+
312
+ Non-cherry-picked batch test with random seed 12345 ("man in suit"):
313
+
314
+ ![img](github_docs/imgs/openpose_1.png)
315
+
316
+ Non-cherry-picked batch test with random seed 12345 (multiple people in the wild, "handsome boys in the party"):
317
+
318
+ ![img](github_docs/imgs/openpose_2.png)
319
+
320
+ **Improvements in Openpose 1.1:**
321
+
322
+ 1. The improvement of this model is mainly based on our improved implementation of OpenPose. We carefully reviewed the difference between the pytorch OpenPose and CMU's c++ openpose. Now the processor should be more accurate, especially for hands. The improvement of processor leads to the improvement of Openpose 1.1.
323
+ 2. More inputs are supported (hand and face).
324
+ 3. The training dataset of previous cnet 1.0 has several problems including (1) a small group of greyscale human images are duplicated thousands of times (!!), causing the previous model somewhat likely to generate grayscale human images; (2) some images has low quality, very blurry, or significant JPEG artifacts; (3) a small group of images has wrong paired prompts caused by a mistake in our data processing scripts. The new model fixed all problems of the training dataset and should be more reasonable in many cases.
325
+
326
+ ## ControlNet 1.1 Lineart
327
+
328
+ Control Stable Diffusion with Linearts.
329
+
330
+ Model file: control_v11p_sd15_lineart.pth
331
+
332
+ Config file: control_v11p_sd15_lineart.yaml
333
+
334
+ This model is trained on awacke1/Image-to-Line-Drawings. The preprocessor can generate detailed or coarse linearts from images (Lineart and Lineart_Coarse). The model is trained with sufficient data augmentation and can receive manually drawn linearts.
335
+
336
+ python gradio_lineart.py
337
+
338
+ Non-cherry-picked batch test with random seed 12345 (detailed lineart extractor, "bag"):
339
+
340
+ ![img](github_docs/imgs/lineart_1.png)
341
+
342
+ Non-cherry-picked batch test with random seed 12345 (coarse lineart extractor, "Michael Jackson's concert"):
343
+
344
+ ![img](github_docs/imgs/lineart_2.png)
345
+
346
+ Non-cherry-picked batch test with random seed 12345 (use manually drawn linearts, "wolf"):
347
+
348
+ ![img](github_docs/imgs/lineart_3.png)
349
+
350
+
351
+ ## ControlNet 1.1 Anime Lineart
352
+
353
+ Control Stable Diffusion with Anime Linearts.
354
+
355
+ Model file: control_v11p_sd15s2_lineart_anime.pth
356
+
357
+ Config file: control_v11p_sd15s2_lineart_anime.yaml
358
+
359
+ Training data and implementation details: (description removed).
360
+
361
+ This model can take real anime line drawings or extracted line drawings as inputs.
362
+
363
+ Some important notice:
364
+
365
+ 1. You need a file "anything-v3-full.safetensors" to run the demo. We will not provide the file. Please find that file on the Internet on your own.
366
+ 2. This model is trained with 3x token length and clip skip 2.
367
+ 3. This is a long prompt model. Unless you use LoRAs, results are better with long prompts.
368
+ 4. This model does not support Guess Mode.
369
+
370
+ Demo:
371
+
372
+ python gradio_lineart_anime.py
373
+
374
+
375
+ Non-cherry-picked batch test with random seed 12345 ("1girl, in classroom, skirt, uniform, red hair, bag, green eyes"):
376
+
377
+ ![img](github_docs/imgs/anime_3.png)
378
+
379
+ Non-cherry-picked batch test with random seed 12345 ("1girl, saber, at night, sword, green eyes, golden hair, stocking"):
380
+
381
+ ![img](github_docs/imgs/anime_4.png)
382
+
383
+ Non-cherry-picked batch test with random seed 12345 (extracted line drawing, "1girl, Castle, silver hair, dress, Gemstone, cinematic lighting, mechanical hand, 4k, 8k, extremely detailed, Gothic, green eye"):
384
+
385
+ ![img](github_docs/imgs/anime_6.png)
386
+
387
+ ## ControlNet 1.1 Shuffle
388
+
389
+ Control Stable Diffusion with Content Shuffle.
390
+
391
+ Model file: control_v11e_sd15_shuffle.pth
392
+
393
+ Config file: control_v11e_sd15_shuffle.yaml
394
+
395
+ Demo:
396
+
397
+ python gradio_shuffle.py
398
+
399
+ The model is trained to reorganize images. [We use a random flow to shuffle the image and control Stable Diffusion to recompose the image.](github_docs/annotator.md#content-reshuffle)
400
+
401
+ Non-cherry-picked batch test with random seed 12345 ("hong kong"):
402
+
403
+ ![img](github_docs/imgs/shuffle_1.png)
404
+
405
+ In the 6 images on the right, the left-top one is the "shuffled" image. All others are outputs.
406
+
407
+ In fact, since the ControlNet is trained to recompose images, we do not even need to shuffle the input - sometimes we can just use the original image as input.
408
+
409
+ In this way, this ControlNet can be guided by prompts or other ControlNets to change the image style.
410
+
411
+ Note that this method has nothing to do with CLIP vision or some other models.
412
+
413
+ This is a pure ControlNet.
414
+
415
+ Non-cherry-picked batch test with random seed 12345 ("iron man"):
416
+
417
+ ![img](github_docs/imgs/shuffle_2.png)
418
+
419
+ Non-cherry-picked batch test with random seed 12345 ("spider man"):
420
+
421
+ ![img](github_docs/imgs/shuffle_3.png)
422
+
423
+ **Multi-ControlNets** (A1111-only)
424
+
425
+ Source Image (not used):
426
+
427
+ <img src="https://github.com/lllyasviel/ControlNet-v1-1-nightly/assets/19834515/56050654-6a82-495c-8bdc-d63847053e54" width="200">
428
+
429
+ Canny Image (Input):
430
+
431
+ <img src="https://github.com/lllyasviel/ControlNet-v1-1-nightly/assets/19834515/5dcb3d28-b845-4752-948d-6357224ca2ef" width="200">
432
+
433
+ Shuffle Image (Input):
434
+
435
+ <img src="https://github.com/lllyasviel/ControlNet-v1-1-nightly/assets/19834515/c0d98c17-d79b-49d8-96af-89b87c532820" width="200">
436
+
437
+ Outputs:
438
+
439
+ ![image](https://github.com/lllyasviel/ControlNet-v1-1-nightly/assets/19834515/a4b30709-8393-43d1-9da2-5c6c5ea70e9c)
440
+
441
+ (From: https://github.com/Mikubill/sd-webui-controlnet/issues/736#issuecomment-1509986321)
442
+
443
+ **Important If You Implement Your Own Inference:**
444
+
445
+ Note that this ControlNet requires to add a global average pooling " x = torch.mean(x, dim=(2, 3), keepdim=True) " between the ControlNet Encoder outputs and SD Unet layers. And the ControlNet must be put only on the conditional side of cfg scale. We recommend to use the "global_average_pooling" item in the yaml file to control such behaviors.
446
+
447
+ ~Note that this ControlNet Shuffle will be the one and only one image stylization method that we will maintain for the robustness in a long term support. We have tested other CLIP image encoder, Unclip, image tokenization, and image-based prompts but it seems that those methods do not work very well with user prompts or additional/multiple U-Net injections. See also the evidence [here](https://github.com/lllyasviel/ControlNet/issues/255), [here](https://github.com/Mikubill/sd-webui-controlnet/issues/547), and some other related issues.~ After some more recent researches/experiments, we plan to support more types of stylization methods in the future.
448
+
449
+ ## ControlNet 1.1 Instruct Pix2Pix
450
+
451
+ Control Stable Diffusion with Instruct Pix2Pix.
452
+
453
+ Model file: control_v11e_sd15_ip2p.pth
454
+
455
+ Config file: control_v11e_sd15_ip2p.yaml
456
+
457
+ Demo:
458
+
459
+ python gradio_ip2p.py
460
+
461
+ This is a controlnet trained on the [Instruct Pix2Pix dataset](https://github.com/timothybrooks/instruct-pix2pix).
462
+
463
+ Different from official Instruct Pix2Pix, this model is trained with 50\% instruction prompts and 50\% description prompts. For example, "a cute boy" is a description prompt, while "make the boy cute" is a instruction prompt.
464
+
465
+ Because this is a ControlNet, you do not need to trouble with original IP2P's double cfg tuning. And, this model can be applied to any base model.
466
+
467
+ Also, it seems that instructions like "make it into X" works better than "make Y into X".
468
+
469
+ Non-cherry-picked batch test with random seed 12345 ("make it on fire"):
470
+
471
+ ![img](github_docs/imgs/ip2p_1.png)
472
+
473
+ Non-cherry-picked batch test with random seed 12345 ("make it winter"):
474
+
475
+ ![img](github_docs/imgs/ip2p_2.png)
476
+
477
+ We mark this model as "experimental" because it sometimes needs cherry-picking. For example, here is non-cherry-picked batch test with random seed 12345 ("make he iron man"):
478
+
479
+ ![img](github_docs/imgs/ip2p_3.png)
480
+
481
+
482
+ ## ControlNet 1.1 Inpaint
483
+
484
+ Control Stable Diffusion with Inpaint.
485
+
486
+ Model file: control_v11p_sd15_inpaint.pth
487
+
488
+ Config file: control_v11p_sd15_inpaint.yaml
489
+
490
+ Demo:
491
+
492
+ python gradio_inpaint.py
493
+
494
+ Some notices:
495
+
496
+ 1. This inpainting ControlNet is trained with 50\% random masks and 50\% random optical flow occlusion masks. This means the model can not only support the inpainting application but also work on video optical flow warping. Perhaps we will provide some example in the future (depending on our workloads).
497
+ 2. We updated the gradio (2023/5/11) so that the standalone gradio codes in main ControlNet repo also do not change unmasked areas. Automatic 1111 users are not influenced.
498
+
499
+ Non-cherry-picked batch test with random seed 12345 ("a handsome man"):
500
+
501
+ ![img](github_docs/imgs/inpaint_after_fix.png)
502
+
503
+ See also the Guidelines for [Using ControlNet Inpaint in Automatic 1111](https://github.com/Mikubill/sd-webui-controlnet/discussions/1143).
504
+
505
+ ## ControlNet 1.1 Tile
506
+
507
+ Update 2023 April 25: The previously unfinished tile model is finished now. The new name is "control_v11f1e_sd15_tile". The "f1e" means 1st bug fix ("f1"), experimental ("e"). The previous "control_v11u_sd15_tile" is removed. Please update if your model name is "v11u".
508
+
509
+ Control Stable Diffusion with Tiles.
510
+
511
+ Model file: control_v11f1e_sd15_tile.pth
512
+
513
+ Config file: control_v11f1e_sd15_tile.yaml
514
+
515
+ Demo:
516
+
517
+ python gradio_tile.py
518
+
519
+ The model can be used in many ways. Overall, the model has two behaviors:
520
+
521
+ * Ignore the details in an image and generate new details.
522
+ * Ignore global prompts if local tile semantics and prompts mismatch, and guide diffusion with local context.
523
+
524
+ Because the model can generate new details and ignore existing image details, we can use this model to remove bad details and add refined details. For example, remove blurring caused by image resizing.
525
+
526
+ Below is an example of 8x super resolution. This is a 64x64 dog image.
527
+
528
+ ![p](test_imgs/dog64.png)
529
+
530
+ Non-cherry-picked batch test with random seed 12345 ("dog on grassland"):
531
+
532
+ ![img](github_docs/imgs/tile_new_1.png)
533
+
534
+ Note that this model is not a super resolution model. It ignores the details in an image and generate new details. This means you can use it to fix bad details in an image.
535
+
536
+ For example, below is a dog image corrupted by Real-ESRGAN. This is a typical example that sometimes super resolution methds fail to upscale images when source context is too small.
537
+
538
+ ![p](test_imgs/dog_bad_sr.png)
539
+
540
+ Non-cherry-picked batch test with random seed 12345 ("dog on grassland"):
541
+
542
+ ![img](github_docs/imgs/tile_new_2.png)
543
+
544
+ If your image already have good details, you can still use this model to replace image details. Note that Stable Diffusion's I2I can achieve similar effects but this model make it much easier for you to maintain the overall structure and only change details even with denoising strength 1.0 .
545
+
546
+ Non-cherry-picked batch test with random seed 12345 ("Silver Armor"):
547
+
548
+ ![img](github_docs/imgs/tile_new_3.png)
549
+
550
+ More and more people begin to think about different methods to diffuse at tiles so that images can be very big (at 4k or 8k).
551
+
552
+ The problem is that, in Stable Diffusion, your prompts will always influent each tile.
553
+
554
+ For example, if your prompts are "a beautiful girl" and you split an image into 4×4=16 blocks and do diffusion in each block, then you are will get 16 "beautiful girls" rather than "a beautiful girl". This is a well-known problem.
555
+
556
+ Right now people's solution is to use some meaningless prompts like "clear, clear, super clear" to diffuse blocks. But you can expect that the results will be bad if the denonising strength is high. And because the prompts are bad, the contents are pretty random.
557
+
558
+ ControlNet Tile can solve this problem. For a given tile, it recognizes what is inside the tile and increase the influence of that recognized semantics, and it also decreases the influence of global prompts if contents do not match.
559
+
560
+ Non-cherry-picked batch test with random seed 12345 ("a handsome man"):
561
+
562
+ ![img](github_docs/imgs/tile_new_4.png)
563
+
564
+ You can see that the prompt is "a handsome man" but the model does not paint "a handsome man" on that tree leaves. Instead, it recognizes the tree leaves paint accordingly.
565
+
566
+ In this way, ControlNet is able to change the behavior of any Stable Diffusion model to perform diffusion in tiles.
567
+
568
+ **Gallery of ControlNet Tile**
569
+
570
+ *Note:* Our official support for tiled image upscaling is A1111-only. The gradio example in this repo does not include tiled upscaling scripts. Please use the A1111 extension to perform tiled upscaling (with other tiling scripts like Ultimate SD Upscale or Tiled Diffusion/VAE).
571
+
572
+ From https://github.com/Mikubill/sd-webui-controlnet/discussions/1142#discussioncomment-5788601
573
+
574
+ (Output, **Click image to see full resolution**)
575
+
576
+ ![grannie-comp](https://user-images.githubusercontent.com/54312595/235352555-846982dc-eba2-4e6a-8dfa-076a5e9ee4fd.jpg)
577
+
578
+ (Zooming-in of outputs)
579
+
580
+ ![grannie-Comp_face](https://user-images.githubusercontent.com/54312595/235352557-8f90e59d-8d03-4909-b805-8643940973d0.jpg)
581
+
582
+ ![grannie-Comp_torso](https://user-images.githubusercontent.com/54312595/235352562-ad0a5618-a1dd-40d0-9bfe-65e9786b496f.jpg)
583
+
584
+ ![grannie-Comp_torso2](https://user-images.githubusercontent.com/54312595/235352567-4e9a887f-142f-4f65-8084-d4c7f602985b.jpg)
585
+
586
+ From https://github.com/Mikubill/sd-webui-controlnet/discussions/1142#discussioncomment-5788617
587
+
588
+ (Input)
589
+
590
+ ![image](https://user-images.githubusercontent.com/34932866/235639514-31df5838-e251-4a17-b6ad-a678cdb8a58d.png)
591
+
592
+ (Output, **Click image to see full resolution**)
593
+ ![image](https://user-images.githubusercontent.com/34932866/235639422-1f95d228-f902-4d94-b57b-e67460a719ef.png)
594
+
595
+ From: https://github.com/lllyasviel/ControlNet-v1-1-nightly/issues/50#issuecomment-1541914890
596
+
597
+ (Input)
598
+
599
+ ![image](https://github.com/lllyasviel/ControlNet-v1-1-nightly/assets/19834515/9132700e-b2f9-4a33-a589-611ba234d325)
600
+
601
+ (Output, **Click image to see full resolution**, note that this example is extremely challenging)
602
+
603
+ ![image](https://github.com/lllyasviel/ControlNet-v1-1-nightly/assets/19834515/609acf87-1e51-4c03-85dc-37e486566158)
604
+
605
+ From https://github.com/Mikubill/sd-webui-controlnet/discussions/1142#discussioncomment-5796326:
606
+
607
+ (before)
608
+
609
+ ![2600914554720735184649534855329348215514636378-166329422](https://user-images.githubusercontent.com/31148570/236037445-f91a060b-698a-4cae-bf18-93796351da66.png)
610
+
611
+ (after, **Click image to see full resolution**)
612
+ ![2600914554720735184649534855329348215514636383-1549088886](https://user-images.githubusercontent.com/31148570/236037509-ce24c816-f50f-4fe0-8c19-423bf30dad26.png)
613
+
614
+ **Comparison to Midjourney V5/V5.1 coming soon.**
615
+
616
+ # Annotate Your Own Data
617
+
618
+ We provide simple python scripts to process images.
619
+
620
+ [See a gradio example here](github_docs/annotator.md).
CCEdit-main/src/controlnet11/annotator/util.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+
3
+ import numpy as np
4
+ import cv2
5
+ import os
6
+
7
+
8
+ annotator_ckpts_path = os.path.join(os.path.dirname(__file__), 'ckpts')
9
+
10
+
11
+ def HWC3(x):
12
+ assert x.dtype == np.uint8
13
+ if x.ndim == 2:
14
+ x = x[:, :, None]
15
+ assert x.ndim == 3
16
+ H, W, C = x.shape
17
+ assert C == 1 or C == 3 or C == 4
18
+ if C == 3:
19
+ return x
20
+ if C == 1:
21
+ return np.concatenate([x, x, x], axis=2)
22
+ if C == 4:
23
+ color = x[:, :, 0:3].astype(np.float32)
24
+ alpha = x[:, :, 3:4].astype(np.float32) / 255.0
25
+ y = color * alpha + 255.0 * (1.0 - alpha)
26
+ y = y.clip(0, 255).astype(np.uint8)
27
+ return y
28
+
29
+
30
+ def resize_image(input_image, resolution):
31
+ H, W, C = input_image.shape
32
+ H = float(H)
33
+ W = float(W)
34
+ k = float(resolution) / min(H, W)
35
+ H *= k
36
+ W *= k
37
+ H = int(np.round(H / 64.0)) * 64
38
+ W = int(np.round(W / 64.0)) * 64
39
+ img = cv2.resize(input_image, (W, H), interpolation=cv2.INTER_LANCZOS4 if k > 1 else cv2.INTER_AREA)
40
+ return img
41
+
42
+
43
+ def nms(x, t, s):
44
+ x = cv2.GaussianBlur(x.astype(np.float32), (0, 0), s)
45
+
46
+ f1 = np.array([[0, 0, 0], [1, 1, 1], [0, 0, 0]], dtype=np.uint8)
47
+ f2 = np.array([[0, 1, 0], [0, 1, 0], [0, 1, 0]], dtype=np.uint8)
48
+ f3 = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]], dtype=np.uint8)
49
+ f4 = np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]], dtype=np.uint8)
50
+
51
+ y = np.zeros_like(x)
52
+
53
+ for f in [f1, f2, f3, f4]:
54
+ np.putmask(y, cv2.dilate(x, kernel=f) == x, x)
55
+
56
+ z = np.zeros_like(y, dtype=np.uint8)
57
+ z[y > t] = 255
58
+ return z
59
+
60
+
61
+ def make_noise_disk(H, W, C, F):
62
+ noise = np.random.uniform(low=0, high=1, size=((H // F) + 2, (W // F) + 2, C))
63
+ noise = cv2.resize(noise, (W + 2 * F, H + 2 * F), interpolation=cv2.INTER_CUBIC)
64
+ noise = noise[F: F + H, F: F + W]
65
+ noise -= np.min(noise)
66
+ noise /= np.max(noise)
67
+ if C == 1:
68
+ noise = noise[:, :, None]
69
+ return noise
70
+
71
+
72
+ def min_max_norm(x):
73
+ x -= np.min(x)
74
+ x /= np.maximum(np.max(x), 1e-5)
75
+ return x
76
+
77
+
78
+ def safe_step(x, step=2):
79
+ y = x.astype(np.float32) * float(step + 1)
80
+ y = y.astype(np.int32).astype(np.float32) / float(step)
81
+ return y
82
+
83
+
84
+ def img2mask(img, H, W, low=10, high=90):
85
+ assert img.ndim == 3 or img.ndim == 2
86
+ assert img.dtype == np.uint8
87
+
88
+ if img.ndim == 3:
89
+ y = img[:, :, random.randrange(0, img.shape[2])]
90
+ else:
91
+ y = img
92
+
93
+ y = cv2.resize(y, (W, H), interpolation=cv2.INTER_CUBIC)
94
+
95
+ if random.uniform(0, 1) < 0.5:
96
+ y = 255 - y
97
+
98
+ return y < np.percentile(y, random.randrange(low, high))
CCEdit-main/src/controlnet11/annotator/zoe/LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2022 Intelligent Systems Lab Org
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
CCEdit-main/src/controlnet11/annotator/zoe/__init__.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ZoeDepth
2
+ # https://github.com/isl-org/ZoeDepth
3
+
4
+ import os
5
+ import cv2
6
+ import numpy as np
7
+ import torch
8
+
9
+ from einops import rearrange
10
+ from .zoedepth.models.zoedepth.zoedepth_v1 import ZoeDepth
11
+ from .zoedepth.utils.config import get_config
12
+ from annotator.util import annotator_ckpts_path
13
+
14
+
15
+ class ZoeDetector:
16
+ def __init__(self):
17
+ remote_model_path = "https://huggingface.co/lllyasviel/Annotators/resolve/main/ZoeD_M12_N.pt"
18
+ modelpath = os.path.join(annotator_ckpts_path, "ZoeD_M12_N.pt")
19
+ if not os.path.exists(modelpath):
20
+ from basicsr.utils.download_util import load_file_from_url
21
+ load_file_from_url(remote_model_path, model_dir=annotator_ckpts_path)
22
+ conf = get_config("zoedepth", "infer")
23
+ model = ZoeDepth.build_from_config(conf)
24
+ model.load_state_dict(torch.load(modelpath)['model'])
25
+ model = model.cuda()
26
+ model.device = 'cuda'
27
+ model.eval()
28
+ self.model = model
29
+
30
+ def __call__(self, input_image):
31
+ assert input_image.ndim == 3
32
+ image_depth = input_image
33
+ with torch.no_grad():
34
+ image_depth = torch.from_numpy(image_depth).float().cuda()
35
+ image_depth = image_depth / 255.0
36
+ image_depth = rearrange(image_depth, 'h w c -> 1 c h w')
37
+ depth = self.model.infer(image_depth)
38
+
39
+ depth = depth[0, 0].cpu().numpy()
40
+
41
+ vmin = np.percentile(depth, 2)
42
+ vmax = np.percentile(depth, 85)
43
+
44
+ depth -= vmin
45
+ depth /= vmax - vmin
46
+ depth = 1.0 - depth
47
+ depth_image = (depth * 255.0).clip(0, 255).astype(np.uint8)
48
+
49
+ return depth_image
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/__init__.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+
3
+ # Copyright (c) 2022 Intelligent Systems Lab Org
4
+
5
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ # of this software and associated documentation files (the "Software"), to deal
7
+ # in the Software without restriction, including without limitation the rights
8
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ # copies of the Software, and to permit persons to whom the Software is
10
+ # furnished to do so, subject to the following conditions:
11
+
12
+ # The above copyright notice and this permission notice shall be included in all
13
+ # copies or substantial portions of the Software.
14
+
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ # SOFTWARE.
22
+
23
+ # File author: Shariq Farooq Bhat
24
+
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/__init__.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+
3
+ # Copyright (c) 2022 Intelligent Systems Lab Org
4
+
5
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ # of this software and associated documentation files (the "Software"), to deal
7
+ # in the Software without restriction, including without limitation the rights
8
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ # copies of the Software, and to permit persons to whom the Software is
10
+ # furnished to do so, subject to the following conditions:
11
+
12
+ # The above copyright notice and this permission notice shall be included in all
13
+ # copies or substantial portions of the Software.
14
+
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ # SOFTWARE.
22
+
23
+ # File author: Shariq Farooq Bhat
24
+
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas.py ADDED
@@ -0,0 +1,379 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+ import os
3
+
4
+ # Copyright (c) 2022 Intelligent Systems Lab Org
5
+
6
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
7
+ # of this software and associated documentation files (the "Software"), to deal
8
+ # in the Software without restriction, including without limitation the rights
9
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10
+ # copies of the Software, and to permit persons to whom the Software is
11
+ # furnished to do so, subject to the following conditions:
12
+
13
+ # The above copyright notice and this permission notice shall be included in all
14
+ # copies or substantial portions of the Software.
15
+
16
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22
+ # SOFTWARE.
23
+
24
+ # File author: Shariq Farooq Bhat
25
+
26
+ import torch
27
+ import torch.nn as nn
28
+ import numpy as np
29
+ from torchvision.transforms import Normalize
30
+
31
+
32
+ def denormalize(x):
33
+ """Reverses the imagenet normalization applied to the input.
34
+
35
+ Args:
36
+ x (torch.Tensor - shape(N,3,H,W)): input tensor
37
+
38
+ Returns:
39
+ torch.Tensor - shape(N,3,H,W): Denormalized input
40
+ """
41
+ mean = torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1).to(x.device)
42
+ std = torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1).to(x.device)
43
+ return x * std + mean
44
+
45
+ def get_activation(name, bank):
46
+ def hook(model, input, output):
47
+ bank[name] = output
48
+ return hook
49
+
50
+
51
+ class Resize(object):
52
+ """Resize sample to given size (width, height).
53
+ """
54
+
55
+ def __init__(
56
+ self,
57
+ width,
58
+ height,
59
+ resize_target=True,
60
+ keep_aspect_ratio=False,
61
+ ensure_multiple_of=1,
62
+ resize_method="lower_bound",
63
+ ):
64
+ """Init.
65
+ Args:
66
+ width (int): desired output width
67
+ height (int): desired output height
68
+ resize_target (bool, optional):
69
+ True: Resize the full sample (image, mask, target).
70
+ False: Resize image only.
71
+ Defaults to True.
72
+ keep_aspect_ratio (bool, optional):
73
+ True: Keep the aspect ratio of the input sample.
74
+ Output sample might not have the given width and height, and
75
+ resize behaviour depends on the parameter 'resize_method'.
76
+ Defaults to False.
77
+ ensure_multiple_of (int, optional):
78
+ Output width and height is constrained to be multiple of this parameter.
79
+ Defaults to 1.
80
+ resize_method (str, optional):
81
+ "lower_bound": Output will be at least as large as the given size.
82
+ "upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.)
83
+ "minimal": Scale as least as possible. (Output size might be smaller than given size.)
84
+ Defaults to "lower_bound".
85
+ """
86
+ print("Params passed to Resize transform:")
87
+ print("\twidth: ", width)
88
+ print("\theight: ", height)
89
+ print("\tresize_target: ", resize_target)
90
+ print("\tkeep_aspect_ratio: ", keep_aspect_ratio)
91
+ print("\tensure_multiple_of: ", ensure_multiple_of)
92
+ print("\tresize_method: ", resize_method)
93
+
94
+ self.__width = width
95
+ self.__height = height
96
+
97
+ self.__keep_aspect_ratio = keep_aspect_ratio
98
+ self.__multiple_of = ensure_multiple_of
99
+ self.__resize_method = resize_method
100
+
101
+ def constrain_to_multiple_of(self, x, min_val=0, max_val=None):
102
+ y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int)
103
+
104
+ if max_val is not None and y > max_val:
105
+ y = (np.floor(x / self.__multiple_of)
106
+ * self.__multiple_of).astype(int)
107
+
108
+ if y < min_val:
109
+ y = (np.ceil(x / self.__multiple_of)
110
+ * self.__multiple_of).astype(int)
111
+
112
+ return y
113
+
114
+ def get_size(self, width, height):
115
+ # determine new height and width
116
+ scale_height = self.__height / height
117
+ scale_width = self.__width / width
118
+
119
+ if self.__keep_aspect_ratio:
120
+ if self.__resize_method == "lower_bound":
121
+ # scale such that output size is lower bound
122
+ if scale_width > scale_height:
123
+ # fit width
124
+ scale_height = scale_width
125
+ else:
126
+ # fit height
127
+ scale_width = scale_height
128
+ elif self.__resize_method == "upper_bound":
129
+ # scale such that output size is upper bound
130
+ if scale_width < scale_height:
131
+ # fit width
132
+ scale_height = scale_width
133
+ else:
134
+ # fit height
135
+ scale_width = scale_height
136
+ elif self.__resize_method == "minimal":
137
+ # scale as least as possbile
138
+ if abs(1 - scale_width) < abs(1 - scale_height):
139
+ # fit width
140
+ scale_height = scale_width
141
+ else:
142
+ # fit height
143
+ scale_width = scale_height
144
+ else:
145
+ raise ValueError(
146
+ f"resize_method {self.__resize_method} not implemented"
147
+ )
148
+
149
+ if self.__resize_method == "lower_bound":
150
+ new_height = self.constrain_to_multiple_of(
151
+ scale_height * height, min_val=self.__height
152
+ )
153
+ new_width = self.constrain_to_multiple_of(
154
+ scale_width * width, min_val=self.__width
155
+ )
156
+ elif self.__resize_method == "upper_bound":
157
+ new_height = self.constrain_to_multiple_of(
158
+ scale_height * height, max_val=self.__height
159
+ )
160
+ new_width = self.constrain_to_multiple_of(
161
+ scale_width * width, max_val=self.__width
162
+ )
163
+ elif self.__resize_method == "minimal":
164
+ new_height = self.constrain_to_multiple_of(scale_height * height)
165
+ new_width = self.constrain_to_multiple_of(scale_width * width)
166
+ else:
167
+ raise ValueError(
168
+ f"resize_method {self.__resize_method} not implemented")
169
+
170
+ return (new_width, new_height)
171
+
172
+ def __call__(self, x):
173
+ width, height = self.get_size(*x.shape[-2:][::-1])
174
+ return nn.functional.interpolate(x, (height, width), mode='bilinear', align_corners=True)
175
+
176
+ class PrepForMidas(object):
177
+ def __init__(self, resize_mode="minimal", keep_aspect_ratio=True, img_size=384, do_resize=True):
178
+ if isinstance(img_size, int):
179
+ img_size = (img_size, img_size)
180
+ net_h, net_w = img_size
181
+ self.normalization = Normalize(
182
+ mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
183
+ self.resizer = Resize(net_w, net_h, keep_aspect_ratio=keep_aspect_ratio, ensure_multiple_of=32, resize_method=resize_mode) \
184
+ if do_resize else nn.Identity()
185
+
186
+ def __call__(self, x):
187
+ return self.normalization(self.resizer(x))
188
+
189
+
190
+ class MidasCore(nn.Module):
191
+ def __init__(self, midas, trainable=False, fetch_features=True, layer_names=('out_conv', 'l4_rn', 'r4', 'r3', 'r2', 'r1'), freeze_bn=False, keep_aspect_ratio=True,
192
+ img_size=384, **kwargs):
193
+ """Midas Base model used for multi-scale feature extraction.
194
+
195
+ Args:
196
+ midas (torch.nn.Module): Midas model.
197
+ trainable (bool, optional): Train midas model. Defaults to False.
198
+ fetch_features (bool, optional): Extract multi-scale features. Defaults to True.
199
+ layer_names (tuple, optional): Layers used for feature extraction. Order = (head output features, last layer features, ...decoder features). Defaults to ('out_conv', 'l4_rn', 'r4', 'r3', 'r2', 'r1').
200
+ freeze_bn (bool, optional): Freeze BatchNorm. Generally results in better finetuning performance. Defaults to False.
201
+ keep_aspect_ratio (bool, optional): Keep the aspect ratio of input images while resizing. Defaults to True.
202
+ img_size (int, tuple, optional): Input resolution. Defaults to 384.
203
+ """
204
+ super().__init__()
205
+ self.core = midas
206
+ self.output_channels = None
207
+ self.core_out = {}
208
+ self.trainable = trainable
209
+ self.fetch_features = fetch_features
210
+ # midas.scratch.output_conv = nn.Identity()
211
+ self.handles = []
212
+ # self.layer_names = ['out_conv','l4_rn', 'r4', 'r3', 'r2', 'r1']
213
+ self.layer_names = layer_names
214
+
215
+ self.set_trainable(trainable)
216
+ self.set_fetch_features(fetch_features)
217
+
218
+ self.prep = PrepForMidas(keep_aspect_ratio=keep_aspect_ratio,
219
+ img_size=img_size, do_resize=kwargs.get('do_resize', True))
220
+
221
+ if freeze_bn:
222
+ self.freeze_bn()
223
+
224
+ def set_trainable(self, trainable):
225
+ self.trainable = trainable
226
+ if trainable:
227
+ self.unfreeze()
228
+ else:
229
+ self.freeze()
230
+ return self
231
+
232
+ def set_fetch_features(self, fetch_features):
233
+ self.fetch_features = fetch_features
234
+ if fetch_features:
235
+ if len(self.handles) == 0:
236
+ self.attach_hooks(self.core)
237
+ else:
238
+ self.remove_hooks()
239
+ return self
240
+
241
+ def freeze(self):
242
+ for p in self.parameters():
243
+ p.requires_grad = False
244
+ self.trainable = False
245
+ return self
246
+
247
+ def unfreeze(self):
248
+ for p in self.parameters():
249
+ p.requires_grad = True
250
+ self.trainable = True
251
+ return self
252
+
253
+ def freeze_bn(self):
254
+ for m in self.modules():
255
+ if isinstance(m, nn.BatchNorm2d):
256
+ m.eval()
257
+ return self
258
+
259
+ def forward(self, x, denorm=False, return_rel_depth=False):
260
+ with torch.no_grad():
261
+ if denorm:
262
+ x = denormalize(x)
263
+ x = self.prep(x)
264
+ # print("Shape after prep: ", x.shape)
265
+
266
+ with torch.set_grad_enabled(self.trainable):
267
+
268
+ # print("Input size to Midascore", x.shape)
269
+ rel_depth = self.core(x)
270
+ # print("Output from midas shape", rel_depth.shape)
271
+ if not self.fetch_features:
272
+ return rel_depth
273
+ out = [self.core_out[k] for k in self.layer_names]
274
+
275
+ if return_rel_depth:
276
+ return rel_depth, out
277
+ return out
278
+
279
+ def get_rel_pos_params(self):
280
+ for name, p in self.core.pretrained.named_parameters():
281
+ if "relative_position" in name:
282
+ yield p
283
+
284
+ def get_enc_params_except_rel_pos(self):
285
+ for name, p in self.core.pretrained.named_parameters():
286
+ if "relative_position" not in name:
287
+ yield p
288
+
289
+ def freeze_encoder(self, freeze_rel_pos=False):
290
+ if freeze_rel_pos:
291
+ for p in self.core.pretrained.parameters():
292
+ p.requires_grad = False
293
+ else:
294
+ for p in self.get_enc_params_except_rel_pos():
295
+ p.requires_grad = False
296
+ return self
297
+
298
+ def attach_hooks(self, midas):
299
+ if len(self.handles) > 0:
300
+ self.remove_hooks()
301
+ if "out_conv" in self.layer_names:
302
+ self.handles.append(list(midas.scratch.output_conv.children())[
303
+ 3].register_forward_hook(get_activation("out_conv", self.core_out)))
304
+ if "r4" in self.layer_names:
305
+ self.handles.append(midas.scratch.refinenet4.register_forward_hook(
306
+ get_activation("r4", self.core_out)))
307
+ if "r3" in self.layer_names:
308
+ self.handles.append(midas.scratch.refinenet3.register_forward_hook(
309
+ get_activation("r3", self.core_out)))
310
+ if "r2" in self.layer_names:
311
+ self.handles.append(midas.scratch.refinenet2.register_forward_hook(
312
+ get_activation("r2", self.core_out)))
313
+ if "r1" in self.layer_names:
314
+ self.handles.append(midas.scratch.refinenet1.register_forward_hook(
315
+ get_activation("r1", self.core_out)))
316
+ if "l4_rn" in self.layer_names:
317
+ self.handles.append(midas.scratch.layer4_rn.register_forward_hook(
318
+ get_activation("l4_rn", self.core_out)))
319
+
320
+ return self
321
+
322
+ def remove_hooks(self):
323
+ for h in self.handles:
324
+ h.remove()
325
+ return self
326
+
327
+ def __del__(self):
328
+ self.remove_hooks()
329
+
330
+ def set_output_channels(self, model_type):
331
+ self.output_channels = MIDAS_SETTINGS[model_type]
332
+
333
+ @staticmethod
334
+ def build(midas_model_type="DPT_BEiT_L_384", train_midas=False, use_pretrained_midas=True, fetch_features=False, freeze_bn=True, force_keep_ar=False, force_reload=False, **kwargs):
335
+ if midas_model_type not in MIDAS_SETTINGS:
336
+ raise ValueError(
337
+ f"Invalid model type: {midas_model_type}. Must be one of {list(MIDAS_SETTINGS.keys())}")
338
+ if "img_size" in kwargs:
339
+ kwargs = MidasCore.parse_img_size(kwargs)
340
+ img_size = kwargs.pop("img_size", [384, 384])
341
+ print("img_size", img_size)
342
+ midas_path = os.path.join(os.path.dirname(__file__), 'midas_repo')
343
+ midas = torch.hub.load(midas_path, midas_model_type,
344
+ pretrained=use_pretrained_midas, force_reload=force_reload, source='local')
345
+ kwargs.update({'keep_aspect_ratio': force_keep_ar})
346
+ midas_core = MidasCore(midas, trainable=train_midas, fetch_features=fetch_features,
347
+ freeze_bn=freeze_bn, img_size=img_size, **kwargs)
348
+ midas_core.set_output_channels(midas_model_type)
349
+ return midas_core
350
+
351
+ @staticmethod
352
+ def build_from_config(config):
353
+ return MidasCore.build(**config)
354
+
355
+ @staticmethod
356
+ def parse_img_size(config):
357
+ assert 'img_size' in config
358
+ if isinstance(config['img_size'], str):
359
+ assert "," in config['img_size'], "img_size should be a string with comma separated img_size=H,W"
360
+ config['img_size'] = list(map(int, config['img_size'].split(",")))
361
+ assert len(
362
+ config['img_size']) == 2, "img_size should be a string with comma separated img_size=H,W"
363
+ elif isinstance(config['img_size'], int):
364
+ config['img_size'] = [config['img_size'], config['img_size']]
365
+ else:
366
+ assert isinstance(config['img_size'], list) and len(
367
+ config['img_size']) == 2, "img_size should be a list of H,W"
368
+ return config
369
+
370
+
371
+ nchannels2models = {
372
+ tuple([256]*5): ["DPT_BEiT_L_384", "DPT_BEiT_L_512", "DPT_BEiT_B_384", "DPT_SwinV2_L_384", "DPT_SwinV2_B_384", "DPT_SwinV2_T_256", "DPT_Large", "DPT_Hybrid"],
373
+ (512, 256, 128, 64, 64): ["MiDaS_small"]
374
+ }
375
+
376
+ # Model name to number of output channels
377
+ MIDAS_SETTINGS = {m: k for k, v in nchannels2models.items()
378
+ for m in v
379
+ }
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/.gitignore ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ *.egg-info/
24
+ .installed.cfg
25
+ *.egg
26
+ MANIFEST
27
+
28
+ # PyInstaller
29
+ # Usually these files are written by a python script from a template
30
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
31
+ *.manifest
32
+ *.spec
33
+
34
+ # Installer logs
35
+ pip-log.txt
36
+ pip-delete-this-directory.txt
37
+
38
+ # Unit test / coverage reports
39
+ htmlcov/
40
+ .tox/
41
+ .coverage
42
+ .coverage.*
43
+ .cache
44
+ nosetests.xml
45
+ coverage.xml
46
+ *.cover
47
+ .hypothesis/
48
+ .pytest_cache/
49
+
50
+ # Translations
51
+ *.mo
52
+ *.pot
53
+
54
+ # Django stuff:
55
+ *.log
56
+ local_settings.py
57
+ db.sqlite3
58
+
59
+ # Flask stuff:
60
+ instance/
61
+ .webassets-cache
62
+
63
+ # Scrapy stuff:
64
+ .scrapy
65
+
66
+ # Sphinx documentation
67
+ docs/_build/
68
+
69
+ # PyBuilder
70
+ target/
71
+
72
+ # Jupyter Notebook
73
+ .ipynb_checkpoints
74
+
75
+ # pyenv
76
+ .python-version
77
+
78
+ # celery beat schedule file
79
+ celerybeat-schedule
80
+
81
+ # SageMath parsed files
82
+ *.sage.py
83
+
84
+ # Environments
85
+ .env
86
+ .venv
87
+ env/
88
+ venv/
89
+ ENV/
90
+ env.bak/
91
+ venv.bak/
92
+
93
+ # Spyder project settings
94
+ .spyderproject
95
+ .spyproject
96
+
97
+ # Rope project settings
98
+ .ropeproject
99
+
100
+ # mkdocs documentation
101
+ /site
102
+
103
+ # mypy
104
+ .mypy_cache/
105
+
106
+ *.png
107
+ *.pfm
108
+ *.jpg
109
+ *.jpeg
110
+ *.pt
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/Dockerfile ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # enables cuda support in docker
2
+ FROM nvidia/cuda:10.2-cudnn7-runtime-ubuntu18.04
3
+
4
+ # install python 3.6, pip and requirements for opencv-python
5
+ # (see https://github.com/NVIDIA/nvidia-docker/issues/864)
6
+ RUN apt-get update && apt-get -y install \
7
+ python3 \
8
+ python3-pip \
9
+ libsm6 \
10
+ libxext6 \
11
+ libxrender-dev \
12
+ curl \
13
+ && rm -rf /var/lib/apt/lists/*
14
+
15
+ # install python dependencies
16
+ RUN pip3 install --upgrade pip
17
+ RUN pip3 install torch~=1.8 torchvision opencv-python-headless~=3.4 timm
18
+
19
+ # copy inference code
20
+ WORKDIR /opt/MiDaS
21
+ COPY ./midas ./midas
22
+ COPY ./*.py ./
23
+
24
+ # download model weights so the docker image can be used offline
25
+ RUN cd weights && {curl -OL https://github.com/isl-org/MiDaS/releases/download/v3/dpt_hybrid_384.pt; cd -; }
26
+ RUN python3 run.py --model_type dpt_hybrid; exit 0
27
+
28
+ # entrypoint (dont forget to mount input and output directories)
29
+ CMD python3 run.py --model_type dpt_hybrid
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/environment.yaml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: midas-py310
2
+ channels:
3
+ - pytorch
4
+ - defaults
5
+ dependencies:
6
+ - nvidia::cudatoolkit=11.7
7
+ - python=3.10.8
8
+ - pytorch::pytorch=1.13.0
9
+ - torchvision=0.14.0
10
+ - pip=22.3.1
11
+ - numpy=1.23.4
12
+ - pip:
13
+ - opencv-python==4.6.0.66
14
+ - imutils==0.5.4
15
+ - timm==0.6.12
16
+ - einops==0.6.0
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/ios/.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # ignore model file
2
+ #*.tflite
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/ios/LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/ios/Midas/AppDelegate.swift ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
+ //
3
+ // Licensed under the Apache License, Version 2.0 (the "License");
4
+ // you may not use this file except in compliance with the License.
5
+ // You may obtain a copy of the License at
6
+ //
7
+ // http://www.apache.org/licenses/LICENSE-2.0
8
+ //
9
+ // Unless required by applicable law or agreed to in writing, software
10
+ // distributed under the License is distributed on an "AS IS" BASIS,
11
+ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ // See the License for the specific language governing permissions and
13
+ // limitations under the License.
14
+
15
+ import UIKit
16
+
17
+ @UIApplicationMain
18
+ class AppDelegate: UIResponder, UIApplicationDelegate {
19
+
20
+ var window: UIWindow?
21
+
22
+ func application(_ application: UIApplication, didFinishLaunchingWithOptions launchOptions: [UIApplication.LaunchOptionsKey: Any]?) -> Bool {
23
+ return true
24
+ }
25
+
26
+ func applicationWillResignActive(_ application: UIApplication) {
27
+ }
28
+
29
+ func applicationDidEnterBackground(_ application: UIApplication) {
30
+ }
31
+
32
+ func applicationWillEnterForeground(_ application: UIApplication) {
33
+ }
34
+
35
+ func applicationDidBecomeActive(_ application: UIApplication) {
36
+ }
37
+
38
+ func applicationWillTerminate(_ application: UIApplication) {
39
+ }
40
+ }
41
+
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/ios/Midas/Assets.xcassets/AppIcon.appiconset/Contents.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"images":[{"size":"60x60","expected-size":"180","filename":"180.png","folder":"Assets.xcassets/AppIcon.appiconset/","idiom":"iphone","scale":"3x"},{"size":"40x40","expected-size":"80","filename":"80.png","folder":"Assets.xcassets/AppIcon.appiconset/","idiom":"iphone","scale":"2x"},{"size":"40x40","expected-size":"120","filename":"120.png","folder":"Assets.xcassets/AppIcon.appiconset/","idiom":"iphone","scale":"3x"},{"size":"60x60","expected-size":"120","filename":"120.png","folder":"Assets.xcassets/AppIcon.appiconset/","idiom":"iphone","scale":"2x"},{"size":"57x57","expected-size":"57","filename":"57.png","folder":"Assets.xcassets/AppIcon.appiconset/","idiom":"iphone","scale":"1x"},{"size":"29x29","expected-size":"58","filename":"58.png","folder":"Assets.xcassets/AppIcon.appiconset/","idiom":"iphone","scale":"2x"},{"size":"29x29","expected-size":"29","filename":"29.png","folder":"Assets.xcassets/AppIcon.appiconset/","idiom":"iphone","scale":"1x"},{"size":"29x29","expected-size":"87","filename":"87.png","folder":"Assets.xcassets/AppIcon.appiconset/","idiom":"iphone","scale":"3x"},{"size":"57x57","expected-size":"114","filename":"114.png","folder":"Assets.xcassets/AppIcon.appiconset/","idiom":"iphone","scale":"2x"},{"size":"20x20","expected-size":"40","filename":"40.png","folder":"Assets.xcassets/AppIcon.appiconset/","idiom":"iphone","scale":"2x"},{"size":"20x20","expected-size":"60","filename":"60.png","folder":"Assets.xcassets/AppIcon.appiconset/","idiom":"iphone","scale":"3x"},{"size":"1024x1024","filename":"1024.png","expected-size":"1024","idiom":"ios-marketing","folder":"Assets.xcassets/AppIcon.appiconset/","scale":"1x"},{"size":"40x40","expected-size":"80","filename":"80.png","folder":"Assets.xcassets/AppIcon.appiconset/","idiom":"ipad","scale":"2x"},{"size":"72x72","expected-size":"72","filename":"72.png","folder":"Assets.xcassets/AppIcon.appiconset/","idiom":"ipad","scale":"1x"},{"size":"76x76","expected-size":"152","filename":"152.png","folder":"Assets.xcassets/AppIcon.appiconset/","idiom":"ipad","scale":"2x"},{"size":"50x50","expected-size":"100","filename":"100.png","folder":"Assets.xcassets/AppIcon.appiconset/","idiom":"ipad","scale":"2x"},{"size":"29x29","expected-size":"58","filename":"58.png","folder":"Assets.xcassets/AppIcon.appiconset/","idiom":"ipad","scale":"2x"},{"size":"76x76","expected-size":"76","filename":"76.png","folder":"Assets.xcassets/AppIcon.appiconset/","idiom":"ipad","scale":"1x"},{"size":"29x29","expected-size":"29","filename":"29.png","folder":"Assets.xcassets/AppIcon.appiconset/","idiom":"ipad","scale":"1x"},{"size":"50x50","expected-size":"50","filename":"50.png","folder":"Assets.xcassets/AppIcon.appiconset/","idiom":"ipad","scale":"1x"},{"size":"72x72","expected-size":"144","filename":"144.png","folder":"Assets.xcassets/AppIcon.appiconset/","idiom":"ipad","scale":"2x"},{"size":"40x40","expected-size":"40","filename":"40.png","folder":"Assets.xcassets/AppIcon.appiconset/","idiom":"ipad","scale":"1x"},{"size":"83.5x83.5","expected-size":"167","filename":"167.png","folder":"Assets.xcassets/AppIcon.appiconset/","idiom":"ipad","scale":"2x"},{"size":"20x20","expected-size":"20","filename":"20.png","folder":"Assets.xcassets/AppIcon.appiconset/","idiom":"ipad","scale":"1x"},{"size":"20x20","expected-size":"40","filename":"40.png","folder":"Assets.xcassets/AppIcon.appiconset/","idiom":"ipad","scale":"2x"}]}
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/ios/Midas/Assets.xcassets/Contents.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "info" : {
3
+ "version" : 1,
4
+ "author" : "xcode"
5
+ }
6
+ }
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/ios/Midas/Camera Feed/CameraFeedManager.swift ADDED
@@ -0,0 +1,316 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
+ //
3
+ // Licensed under the Apache License, Version 2.0 (the "License");
4
+ // you may not use this file except in compliance with the License.
5
+ // You may obtain a copy of the License at
6
+ //
7
+ // http://www.apache.org/licenses/LICENSE-2.0
8
+ //
9
+ // Unless required by applicable law or agreed to in writing, software
10
+ // distributed under the License is distributed on an "AS IS" BASIS,
11
+ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ // See the License for the specific language governing permissions and
13
+ // limitations under the License.
14
+
15
+ import AVFoundation
16
+ import UIKit
17
+ import os
18
+
19
+ // MARK: - CameraFeedManagerDelegate Declaration
20
+ @objc protocol CameraFeedManagerDelegate: class {
21
+ /// This method delivers the pixel buffer of the current frame seen by the device's camera.
22
+ @objc optional func cameraFeedManager(
23
+ _ manager: CameraFeedManager, didOutput pixelBuffer: CVPixelBuffer
24
+ )
25
+
26
+ /// This method initimates that a session runtime error occured.
27
+ func cameraFeedManagerDidEncounterSessionRunTimeError(_ manager: CameraFeedManager)
28
+
29
+ /// This method initimates that the session was interrupted.
30
+ func cameraFeedManager(
31
+ _ manager: CameraFeedManager, sessionWasInterrupted canResumeManually: Bool
32
+ )
33
+
34
+ /// This method initimates that the session interruption has ended.
35
+ func cameraFeedManagerDidEndSessionInterruption(_ manager: CameraFeedManager)
36
+
37
+ /// This method initimates that there was an error in video configurtion.
38
+ func presentVideoConfigurationErrorAlert(_ manager: CameraFeedManager)
39
+
40
+ /// This method initimates that the camera permissions have been denied.
41
+ func presentCameraPermissionsDeniedAlert(_ manager: CameraFeedManager)
42
+ }
43
+
44
+ /// This enum holds the state of the camera initialization.
45
+ // MARK: - Camera Initialization State Enum
46
+ enum CameraConfiguration {
47
+ case success
48
+ case failed
49
+ case permissionDenied
50
+ }
51
+
52
+ /// This class manages all camera related functionalities.
53
+ // MARK: - Camera Related Functionalies Manager
54
+ class CameraFeedManager: NSObject {
55
+ // MARK: Camera Related Instance Variables
56
+ private let session: AVCaptureSession = AVCaptureSession()
57
+
58
+ private let previewView: PreviewView
59
+ private let sessionQueue = DispatchQueue(label: "sessionQueue")
60
+ private var cameraConfiguration: CameraConfiguration = .failed
61
+ private lazy var videoDataOutput = AVCaptureVideoDataOutput()
62
+ private var isSessionRunning = false
63
+
64
+ // MARK: CameraFeedManagerDelegate
65
+ weak var delegate: CameraFeedManagerDelegate?
66
+
67
+ // MARK: Initializer
68
+ init(previewView: PreviewView) {
69
+ self.previewView = previewView
70
+ super.init()
71
+
72
+ // Initializes the session
73
+ session.sessionPreset = .high
74
+ self.previewView.session = session
75
+ self.previewView.previewLayer.connection?.videoOrientation = .portrait
76
+ self.previewView.previewLayer.videoGravity = .resizeAspectFill
77
+ self.attemptToConfigureSession()
78
+ }
79
+
80
+ // MARK: Session Start and End methods
81
+
82
+ /// This method starts an AVCaptureSession based on whether the camera configuration was successful.
83
+ func checkCameraConfigurationAndStartSession() {
84
+ sessionQueue.async {
85
+ switch self.cameraConfiguration {
86
+ case .success:
87
+ self.addObservers()
88
+ self.startSession()
89
+ case .failed:
90
+ DispatchQueue.main.async {
91
+ self.delegate?.presentVideoConfigurationErrorAlert(self)
92
+ }
93
+ case .permissionDenied:
94
+ DispatchQueue.main.async {
95
+ self.delegate?.presentCameraPermissionsDeniedAlert(self)
96
+ }
97
+ }
98
+ }
99
+ }
100
+
101
+ /// This method stops a running an AVCaptureSession.
102
+ func stopSession() {
103
+ self.removeObservers()
104
+ sessionQueue.async {
105
+ if self.session.isRunning {
106
+ self.session.stopRunning()
107
+ self.isSessionRunning = self.session.isRunning
108
+ }
109
+ }
110
+
111
+ }
112
+
113
+ /// This method resumes an interrupted AVCaptureSession.
114
+ func resumeInterruptedSession(withCompletion completion: @escaping (Bool) -> Void) {
115
+ sessionQueue.async {
116
+ self.startSession()
117
+
118
+ DispatchQueue.main.async {
119
+ completion(self.isSessionRunning)
120
+ }
121
+ }
122
+ }
123
+
124
+ /// This method starts the AVCaptureSession
125
+ private func startSession() {
126
+ self.session.startRunning()
127
+ self.isSessionRunning = self.session.isRunning
128
+ }
129
+
130
+ // MARK: Session Configuration Methods.
131
+ /// This method requests for camera permissions and handles the configuration of the session and stores the result of configuration.
132
+ private func attemptToConfigureSession() {
133
+ switch AVCaptureDevice.authorizationStatus(for: .video) {
134
+ case .authorized:
135
+ self.cameraConfiguration = .success
136
+ case .notDetermined:
137
+ self.sessionQueue.suspend()
138
+ self.requestCameraAccess(completion: { granted in
139
+ self.sessionQueue.resume()
140
+ })
141
+ case .denied:
142
+ self.cameraConfiguration = .permissionDenied
143
+ default:
144
+ break
145
+ }
146
+
147
+ self.sessionQueue.async {
148
+ self.configureSession()
149
+ }
150
+ }
151
+
152
+ /// This method requests for camera permissions.
153
+ private func requestCameraAccess(completion: @escaping (Bool) -> Void) {
154
+ AVCaptureDevice.requestAccess(for: .video) { (granted) in
155
+ if !granted {
156
+ self.cameraConfiguration = .permissionDenied
157
+ } else {
158
+ self.cameraConfiguration = .success
159
+ }
160
+ completion(granted)
161
+ }
162
+ }
163
+
164
+ /// This method handles all the steps to configure an AVCaptureSession.
165
+ private func configureSession() {
166
+ guard cameraConfiguration == .success else {
167
+ return
168
+ }
169
+ session.beginConfiguration()
170
+
171
+ // Tries to add an AVCaptureDeviceInput.
172
+ guard addVideoDeviceInput() == true else {
173
+ self.session.commitConfiguration()
174
+ self.cameraConfiguration = .failed
175
+ return
176
+ }
177
+
178
+ // Tries to add an AVCaptureVideoDataOutput.
179
+ guard addVideoDataOutput() else {
180
+ self.session.commitConfiguration()
181
+ self.cameraConfiguration = .failed
182
+ return
183
+ }
184
+
185
+ session.commitConfiguration()
186
+ self.cameraConfiguration = .success
187
+ }
188
+
189
+ /// This method tries to an AVCaptureDeviceInput to the current AVCaptureSession.
190
+ private func addVideoDeviceInput() -> Bool {
191
+ /// Tries to get the default back camera.
192
+ guard
193
+ let camera = AVCaptureDevice.default(.builtInWideAngleCamera, for: .video, position: .back)
194
+ else {
195
+ fatalError("Cannot find camera")
196
+ }
197
+
198
+ do {
199
+ let videoDeviceInput = try AVCaptureDeviceInput(device: camera)
200
+ if session.canAddInput(videoDeviceInput) {
201
+ session.addInput(videoDeviceInput)
202
+ return true
203
+ } else {
204
+ return false
205
+ }
206
+ } catch {
207
+ fatalError("Cannot create video device input")
208
+ }
209
+ }
210
+
211
+ /// This method tries to an AVCaptureVideoDataOutput to the current AVCaptureSession.
212
+ private func addVideoDataOutput() -> Bool {
213
+ let sampleBufferQueue = DispatchQueue(label: "sampleBufferQueue")
214
+ videoDataOutput.setSampleBufferDelegate(self, queue: sampleBufferQueue)
215
+ videoDataOutput.alwaysDiscardsLateVideoFrames = true
216
+ videoDataOutput.videoSettings = [
217
+ String(kCVPixelBufferPixelFormatTypeKey): kCMPixelFormat_32BGRA
218
+ ]
219
+
220
+ if session.canAddOutput(videoDataOutput) {
221
+ session.addOutput(videoDataOutput)
222
+ videoDataOutput.connection(with: .video)?.videoOrientation = .portrait
223
+ return true
224
+ }
225
+ return false
226
+ }
227
+
228
+ // MARK: Notification Observer Handling
229
+ private func addObservers() {
230
+ NotificationCenter.default.addObserver(
231
+ self, selector: #selector(CameraFeedManager.sessionRuntimeErrorOccured(notification:)),
232
+ name: NSNotification.Name.AVCaptureSessionRuntimeError, object: session)
233
+ NotificationCenter.default.addObserver(
234
+ self, selector: #selector(CameraFeedManager.sessionWasInterrupted(notification:)),
235
+ name: NSNotification.Name.AVCaptureSessionWasInterrupted, object: session)
236
+ NotificationCenter.default.addObserver(
237
+ self, selector: #selector(CameraFeedManager.sessionInterruptionEnded),
238
+ name: NSNotification.Name.AVCaptureSessionInterruptionEnded, object: session)
239
+ }
240
+
241
+ private func removeObservers() {
242
+ NotificationCenter.default.removeObserver(
243
+ self, name: NSNotification.Name.AVCaptureSessionRuntimeError, object: session)
244
+ NotificationCenter.default.removeObserver(
245
+ self, name: NSNotification.Name.AVCaptureSessionWasInterrupted, object: session)
246
+ NotificationCenter.default.removeObserver(
247
+ self, name: NSNotification.Name.AVCaptureSessionInterruptionEnded, object: session)
248
+ }
249
+
250
+ // MARK: Notification Observers
251
+ @objc func sessionWasInterrupted(notification: Notification) {
252
+ if let userInfoValue = notification.userInfo?[AVCaptureSessionInterruptionReasonKey]
253
+ as AnyObject?,
254
+ let reasonIntegerValue = userInfoValue.integerValue,
255
+ let reason = AVCaptureSession.InterruptionReason(rawValue: reasonIntegerValue)
256
+ {
257
+ os_log("Capture session was interrupted with reason: %s", type: .error, reason.rawValue)
258
+
259
+ var canResumeManually = false
260
+ if reason == .videoDeviceInUseByAnotherClient {
261
+ canResumeManually = true
262
+ } else if reason == .videoDeviceNotAvailableWithMultipleForegroundApps {
263
+ canResumeManually = false
264
+ }
265
+
266
+ delegate?.cameraFeedManager(self, sessionWasInterrupted: canResumeManually)
267
+
268
+ }
269
+ }
270
+
271
+ @objc func sessionInterruptionEnded(notification: Notification) {
272
+ delegate?.cameraFeedManagerDidEndSessionInterruption(self)
273
+ }
274
+
275
+ @objc func sessionRuntimeErrorOccured(notification: Notification) {
276
+ guard let error = notification.userInfo?[AVCaptureSessionErrorKey] as? AVError else {
277
+ return
278
+ }
279
+
280
+ os_log("Capture session runtime error: %s", type: .error, error.localizedDescription)
281
+
282
+ if error.code == .mediaServicesWereReset {
283
+ sessionQueue.async {
284
+ if self.isSessionRunning {
285
+ self.startSession()
286
+ } else {
287
+ DispatchQueue.main.async {
288
+ self.delegate?.cameraFeedManagerDidEncounterSessionRunTimeError(self)
289
+ }
290
+ }
291
+ }
292
+ } else {
293
+ delegate?.cameraFeedManagerDidEncounterSessionRunTimeError(self)
294
+ }
295
+ }
296
+ }
297
+
298
+ /// AVCaptureVideoDataOutputSampleBufferDelegate
299
+ extension CameraFeedManager: AVCaptureVideoDataOutputSampleBufferDelegate {
300
+ /// This method delegates the CVPixelBuffer of the frame seen by the camera currently.
301
+ func captureOutput(
302
+ _ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer,
303
+ from connection: AVCaptureConnection
304
+ ) {
305
+
306
+ // Converts the CMSampleBuffer to a CVPixelBuffer.
307
+ let pixelBuffer: CVPixelBuffer? = CMSampleBufferGetImageBuffer(sampleBuffer)
308
+
309
+ guard let imagePixelBuffer = pixelBuffer else {
310
+ return
311
+ }
312
+
313
+ // Delegates the pixel buffer to the ViewController.
314
+ delegate?.cameraFeedManager?(self, didOutput: imagePixelBuffer)
315
+ }
316
+ }
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/ios/Midas/Camera Feed/PreviewView.swift ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
+ //
3
+ // Licensed under the Apache License, Version 2.0 (the "License");
4
+ // you may not use this file except in compliance with the License.
5
+ // You may obtain a copy of the License at
6
+ //
7
+ // http://www.apache.org/licenses/LICENSE-2.0
8
+ //
9
+ // Unless required by applicable law or agreed to in writing, software
10
+ // distributed under the License is distributed on an "AS IS" BASIS,
11
+ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ // See the License for the specific language governing permissions and
13
+ // limitations under the License.
14
+
15
+ import UIKit
16
+ import AVFoundation
17
+
18
+ /// The camera frame is displayed on this view.
19
+ class PreviewView: UIView {
20
+ var previewLayer: AVCaptureVideoPreviewLayer {
21
+ guard let layer = layer as? AVCaptureVideoPreviewLayer else {
22
+ fatalError("Layer expected is of type VideoPreviewLayer")
23
+ }
24
+ return layer
25
+ }
26
+
27
+ var session: AVCaptureSession? {
28
+ get {
29
+ return previewLayer.session
30
+ }
31
+ set {
32
+ previewLayer.session = newValue
33
+ }
34
+ }
35
+
36
+ override class var layerClass: AnyClass {
37
+ return AVCaptureVideoPreviewLayer.self
38
+ }
39
+ }
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/ios/Midas/Cells/InfoCell.swift ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
+ //
3
+ // Licensed under the Apache License, Version 2.0 (the "License");
4
+ // you may not use this file except in compliance with the License.
5
+ // You may obtain a copy of the License at
6
+ //
7
+ // http://www.apache.org/licenses/LICENSE-2.0
8
+ //
9
+ // Unless required by applicable law or agreed to in writing, software
10
+ // distributed under the License is distributed on an "AS IS" BASIS,
11
+ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ // See the License for the specific language governing permissions and
13
+ // limitations under the License.
14
+
15
+ import UIKit
16
+
17
+ /// Table cell for inference result in bottom view.
18
+ class InfoCell: UITableViewCell {
19
+ @IBOutlet weak var fieldNameLabel: UILabel!
20
+ @IBOutlet weak var infoLabel: UILabel!
21
+ }
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/ios/Midas/Constants.swift ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2
+ //
3
+ // Licensed under the Apache License, Version 2.0 (the "License");
4
+ // you may not use this file except in compliance with the License.
5
+ // You may obtain a copy of the License at
6
+ //
7
+ // http://www.apache.org/licenses/LICENSE-2.0
8
+ //
9
+ // Unless required by applicable law or agreed to in writing, software
10
+ // distributed under the License is distributed on an "AS IS" BASIS,
11
+ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ // See the License for the specific language governing permissions and
13
+ // limitations under the License.
14
+ // =============================================================================
15
+
16
+ enum Constants {
17
+ // MARK: - Constants related to the image processing
18
+ static let bgraPixel = (channels: 4, alphaComponent: 3, lastBgrComponent: 2)
19
+ static let rgbPixelChannels = 3
20
+ static let maxRGBValue: Float32 = 255.0
21
+
22
+ // MARK: - Constants related to the model interperter
23
+ static let defaultThreadCount = 2
24
+ static let defaultDelegate: Delegates = .CPU
25
+ }
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/ios/Midas/Extensions/CGSizeExtension.swift ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
+ //
3
+ // Licensed under the Apache License, Version 2.0 (the "License");
4
+ // you may not use this file except in compliance with the License.
5
+ // You may obtain a copy of the License at
6
+ //
7
+ // http://www.apache.org/licenses/LICENSE-2.0
8
+ //
9
+ // Unless required by applicable law or agreed to in writing, software
10
+ // distributed under the License is distributed on an "AS IS" BASIS,
11
+ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ // See the License for the specific language governing permissions and
13
+ // limitations under the License.
14
+ // =============================================================================
15
+
16
+ import Accelerate
17
+ import Foundation
18
+
19
+ extension CGSize {
20
+ /// Returns `CGAfineTransform` to resize `self` to fit in destination size, keeping aspect ratio
21
+ /// of `self`. `self` image is resized to be inscribe to destination size and located in center of
22
+ /// destination.
23
+ ///
24
+ /// - Parameter toFitIn: destination size to be filled.
25
+ /// - Returns: `CGAffineTransform` to transform `self` image to `dest` image.
26
+ func transformKeepAspect(toFitIn dest: CGSize) -> CGAffineTransform {
27
+ let sourceRatio = self.height / self.width
28
+ let destRatio = dest.height / dest.width
29
+
30
+ // Calculates ratio `self` to `dest`.
31
+ var ratio: CGFloat
32
+ var x: CGFloat = 0
33
+ var y: CGFloat = 0
34
+ if sourceRatio > destRatio {
35
+ // Source size is taller than destination. Resized to fit in destination height, and find
36
+ // horizontal starting point to be centered.
37
+ ratio = dest.height / self.height
38
+ x = (dest.width - self.width * ratio) / 2
39
+ } else {
40
+ ratio = dest.width / self.width
41
+ y = (dest.height - self.height * ratio) / 2
42
+ }
43
+ return CGAffineTransform(a: ratio, b: 0, c: 0, d: ratio, tx: x, ty: y)
44
+ }
45
+ }
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/ios/Midas/Extensions/CVPixelBufferExtension.swift ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
+ //
3
+ // Licensed under the Apache License, Version 2.0 (the "License");
4
+ // you may not use this file except in compliance with the License.
5
+ // You may obtain a copy of the License at
6
+ //
7
+ // http://www.apache.org/licenses/LICENSE-2.0
8
+ //
9
+ // Unless required by applicable law or agreed to in writing, software
10
+ // distributed under the License is distributed on an "AS IS" BASIS,
11
+ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ // See the License for the specific language governing permissions and
13
+ // limitations under the License.
14
+ // =============================================================================
15
+
16
+ import Accelerate
17
+ import Foundation
18
+
19
+ extension CVPixelBuffer {
20
+ var size: CGSize {
21
+ return CGSize(width: CVPixelBufferGetWidth(self), height: CVPixelBufferGetHeight(self))
22
+ }
23
+
24
+ /// Returns a new `CVPixelBuffer` created by taking the self area and resizing it to the
25
+ /// specified target size. Aspect ratios of source image and destination image are expected to be
26
+ /// same.
27
+ ///
28
+ /// - Parameters:
29
+ /// - from: Source area of image to be cropped and resized.
30
+ /// - to: Size to scale the image to(i.e. image size used while training the model).
31
+ /// - Returns: The cropped and resized image of itself.
32
+ func resize(from source: CGRect, to size: CGSize) -> CVPixelBuffer? {
33
+ let rect = CGRect(origin: CGPoint(x: 0, y: 0), size: self.size)
34
+ guard rect.contains(source) else {
35
+ os_log("Resizing Error: source area is out of index", type: .error)
36
+ return nil
37
+ }
38
+ guard rect.size.width / rect.size.height - source.size.width / source.size.height < 1e-5
39
+ else {
40
+ os_log(
41
+ "Resizing Error: source image ratio and destination image ratio is different",
42
+ type: .error)
43
+ return nil
44
+ }
45
+
46
+ let inputImageRowBytes = CVPixelBufferGetBytesPerRow(self)
47
+ let imageChannels = 4
48
+
49
+ CVPixelBufferLockBaseAddress(self, CVPixelBufferLockFlags(rawValue: 0))
50
+ defer { CVPixelBufferUnlockBaseAddress(self, CVPixelBufferLockFlags(rawValue: 0)) }
51
+
52
+ // Finds the address of the upper leftmost pixel of the source area.
53
+ guard
54
+ let inputBaseAddress = CVPixelBufferGetBaseAddress(self)?.advanced(
55
+ by: Int(source.minY) * inputImageRowBytes + Int(source.minX) * imageChannels)
56
+ else {
57
+ return nil
58
+ }
59
+
60
+ // Crops given area as vImage Buffer.
61
+ var croppedImage = vImage_Buffer(
62
+ data: inputBaseAddress, height: UInt(source.height), width: UInt(source.width),
63
+ rowBytes: inputImageRowBytes)
64
+
65
+ let resultRowBytes = Int(size.width) * imageChannels
66
+ guard let resultAddress = malloc(Int(size.height) * resultRowBytes) else {
67
+ return nil
68
+ }
69
+
70
+ // Allocates a vacant vImage buffer for resized image.
71
+ var resizedImage = vImage_Buffer(
72
+ data: resultAddress,
73
+ height: UInt(size.height), width: UInt(size.width),
74
+ rowBytes: resultRowBytes
75
+ )
76
+
77
+ // Performs the scale operation on cropped image and stores it in result image buffer.
78
+ guard vImageScale_ARGB8888(&croppedImage, &resizedImage, nil, vImage_Flags(0)) == kvImageNoError
79
+ else {
80
+ return nil
81
+ }
82
+
83
+ let releaseCallBack: CVPixelBufferReleaseBytesCallback = { mutablePointer, pointer in
84
+ if let pointer = pointer {
85
+ free(UnsafeMutableRawPointer(mutating: pointer))
86
+ }
87
+ }
88
+
89
+ var result: CVPixelBuffer?
90
+
91
+ // Converts the thumbnail vImage buffer to CVPixelBuffer
92
+ let conversionStatus = CVPixelBufferCreateWithBytes(
93
+ nil,
94
+ Int(size.width), Int(size.height),
95
+ CVPixelBufferGetPixelFormatType(self),
96
+ resultAddress,
97
+ resultRowBytes,
98
+ releaseCallBack,
99
+ nil,
100
+ nil,
101
+ &result
102
+ )
103
+
104
+ guard conversionStatus == kCVReturnSuccess else {
105
+ free(resultAddress)
106
+ return nil
107
+ }
108
+
109
+ return result
110
+ }
111
+
112
+ /// Returns the RGB `Data` representation of the given image buffer.
113
+ ///
114
+ /// - Parameters:
115
+ /// - isModelQuantized: Whether the model is quantized (i.e. fixed point values rather than
116
+ /// floating point values).
117
+ /// - Returns: The RGB data representation of the image buffer or `nil` if the buffer could not be
118
+ /// converted.
119
+ func rgbData(
120
+ isModelQuantized: Bool
121
+ ) -> Data? {
122
+ CVPixelBufferLockBaseAddress(self, .readOnly)
123
+ defer { CVPixelBufferUnlockBaseAddress(self, .readOnly) }
124
+ guard let sourceData = CVPixelBufferGetBaseAddress(self) else {
125
+ return nil
126
+ }
127
+
128
+ let width = CVPixelBufferGetWidth(self)
129
+ let height = CVPixelBufferGetHeight(self)
130
+ let sourceBytesPerRow = CVPixelBufferGetBytesPerRow(self)
131
+ let destinationBytesPerRow = Constants.rgbPixelChannels * width
132
+
133
+ // Assign input image to `sourceBuffer` to convert it.
134
+ var sourceBuffer = vImage_Buffer(
135
+ data: sourceData,
136
+ height: vImagePixelCount(height),
137
+ width: vImagePixelCount(width),
138
+ rowBytes: sourceBytesPerRow)
139
+
140
+ // Make `destinationBuffer` and `destinationData` for its data to be assigned.
141
+ guard let destinationData = malloc(height * destinationBytesPerRow) else {
142
+ os_log("Error: out of memory", type: .error)
143
+ return nil
144
+ }
145
+ defer { free(destinationData) }
146
+ var destinationBuffer = vImage_Buffer(
147
+ data: destinationData,
148
+ height: vImagePixelCount(height),
149
+ width: vImagePixelCount(width),
150
+ rowBytes: destinationBytesPerRow)
151
+
152
+ // Convert image type.
153
+ switch CVPixelBufferGetPixelFormatType(self) {
154
+ case kCVPixelFormatType_32BGRA:
155
+ vImageConvert_BGRA8888toRGB888(&sourceBuffer, &destinationBuffer, UInt32(kvImageNoFlags))
156
+ case kCVPixelFormatType_32ARGB:
157
+ vImageConvert_BGRA8888toRGB888(&sourceBuffer, &destinationBuffer, UInt32(kvImageNoFlags))
158
+ default:
159
+ os_log("The type of this image is not supported.", type: .error)
160
+ return nil
161
+ }
162
+
163
+ // Make `Data` with converted image.
164
+ let imageByteData = Data(
165
+ bytes: destinationBuffer.data, count: destinationBuffer.rowBytes * height)
166
+
167
+ if isModelQuantized { return imageByteData }
168
+
169
+ let imageBytes = [UInt8](imageByteData)
170
+ return Data(copyingBufferOf: imageBytes.map { Float($0) / Constants.maxRGBValue })
171
+ }
172
+ }
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/ios/Midas/Extensions/TFLiteExtension.swift ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
+ //
3
+ // Licensed under the Apache License, Version 2.0 (the "License");
4
+ // you may not use this file except in compliance with the License.
5
+ // You may obtain a copy of the License at
6
+ //
7
+ // http://www.apache.org/licenses/LICENSE-2.0
8
+ //
9
+ // Unless required by applicable law or agreed to in writing, software
10
+ // distributed under the License is distributed on an "AS IS" BASIS,
11
+ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ // See the License for the specific language governing permissions and
13
+ // limitations under the License.
14
+ // =============================================================================
15
+
16
+ import Accelerate
17
+ import CoreImage
18
+ import Foundation
19
+ import TensorFlowLite
20
+
21
+ // MARK: - Data
22
+ extension Data {
23
+ /// Creates a new buffer by copying the buffer pointer of the given array.
24
+ ///
25
+ /// - Warning: The given array's element type `T` must be trivial in that it can be copied bit
26
+ /// for bit with no indirection or reference-counting operations; otherwise, reinterpreting
27
+ /// data from the resulting buffer has undefined behavior.
28
+ /// - Parameter array: An array with elements of type `T`.
29
+ init<T>(copyingBufferOf array: [T]) {
30
+ self = array.withUnsafeBufferPointer(Data.init)
31
+ }
32
+
33
+ /// Convert a Data instance to Array representation.
34
+ func toArray<T>(type: T.Type) -> [T] where T: AdditiveArithmetic {
35
+ var array = [T](repeating: T.zero, count: self.count / MemoryLayout<T>.stride)
36
+ _ = array.withUnsafeMutableBytes { self.copyBytes(to: $0) }
37
+ return array
38
+ }
39
+ }
40
+
41
+ // MARK: - Wrappers
42
+ /// Struct for handling multidimension `Data` in flat `Array`.
43
+ struct FlatArray<Element: AdditiveArithmetic> {
44
+ private var array: [Element]
45
+ var dimensions: [Int]
46
+
47
+ init(tensor: Tensor) {
48
+ dimensions = tensor.shape.dimensions
49
+ array = tensor.data.toArray(type: Element.self)
50
+ }
51
+
52
+ private func flatIndex(_ index: [Int]) -> Int {
53
+ guard index.count == dimensions.count else {
54
+ fatalError("Invalid index: got \(index.count) index(es) for \(dimensions.count) index(es).")
55
+ }
56
+
57
+ var result = 0
58
+ for i in 0..<dimensions.count {
59
+ guard dimensions[i] > index[i] else {
60
+ fatalError("Invalid index: \(index[i]) is bigger than \(dimensions[i])")
61
+ }
62
+ result = dimensions[i] * result + index[i]
63
+ }
64
+ return result
65
+ }
66
+
67
+ subscript(_ index: Int...) -> Element {
68
+ get {
69
+ return array[flatIndex(index)]
70
+ }
71
+ set(newValue) {
72
+ array[flatIndex(index)] = newValue
73
+ }
74
+ }
75
+ }
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/ios/Midas/Info.plist ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
3
+ <plist version="1.0">
4
+ <dict>
5
+ <key>CFBundleDevelopmentRegion</key>
6
+ <string>$(DEVELOPMENT_LANGUAGE)</string>
7
+ <key>CFBundleExecutable</key>
8
+ <string>$(EXECUTABLE_NAME)</string>
9
+ <key>CFBundleIdentifier</key>
10
+ <string>$(PRODUCT_BUNDLE_IDENTIFIER)</string>
11
+ <key>CFBundleInfoDictionaryVersion</key>
12
+ <string>6.0</string>
13
+ <key>CFBundleName</key>
14
+ <string>$(PRODUCT_NAME)</string>
15
+ <key>CFBundlePackageType</key>
16
+ <string>APPL</string>
17
+ <key>CFBundleShortVersionString</key>
18
+ <string>1.0</string>
19
+ <key>CFBundleVersion</key>
20
+ <string>1</string>
21
+ <key>LSRequiresIPhoneOS</key>
22
+ <true/>
23
+ <key>NSCameraUsageDescription</key>
24
+ <string>This app will use camera to continuously estimate the depth map.</string>
25
+ <key>UILaunchStoryboardName</key>
26
+ <string>LaunchScreen</string>
27
+ <key>UIMainStoryboardFile</key>
28
+ <string>Main</string>
29
+ <key>UIRequiredDeviceCapabilities</key>
30
+ <array>
31
+ <string>armv7</string>
32
+ </array>
33
+ <key>UISupportedInterfaceOrientations</key>
34
+ <array>
35
+ <string>UIInterfaceOrientationPortrait</string>
36
+ </array>
37
+ <key>UISupportedInterfaceOrientations~ipad</key>
38
+ <array>
39
+ <string>UIInterfaceOrientationPortrait</string>
40
+ </array>
41
+ </dict>
42
+ </plist>
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/ios/Midas/ModelDataHandler/ModelDataHandler.swift ADDED
@@ -0,0 +1,464 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
+ //
3
+ // Licensed under the Apache License, Version 2.0 (the "License");
4
+ // you may not use this file except in compliance with the License.
5
+ // You may obtain a copy of the License at
6
+ //
7
+ // http://www.apache.org/licenses/LICENSE-2.0
8
+ //
9
+ // Unless required by applicable law or agreed to in writing, software
10
+ // distributed under the License is distributed on an "AS IS" BASIS,
11
+ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ // See the License for the specific language governing permissions and
13
+ // limitations under the License.
14
+
15
+ import Accelerate
16
+ import CoreImage
17
+ import Foundation
18
+ import TensorFlowLite
19
+ import UIKit
20
+
21
+ /// This class handles all data preprocessing and makes calls to run inference on a given frame
22
+ /// by invoking the `Interpreter`. It then formats the inferences obtained.
23
+ class ModelDataHandler {
24
+ // MARK: - Private Properties
25
+
26
+ /// TensorFlow Lite `Interpreter` object for performing inference on a given model.
27
+ private var interpreter: Interpreter
28
+
29
+ /// TensorFlow lite `Tensor` of model input and output.
30
+ private var inputTensor: Tensor
31
+
32
+ //private var heatsTensor: Tensor
33
+ //private var offsetsTensor: Tensor
34
+ private var outputTensor: Tensor
35
+ // MARK: - Initialization
36
+
37
+ /// A failable initializer for `ModelDataHandler`. A new instance is created if the model is
38
+ /// successfully loaded from the app's main bundle. Default `threadCount` is 2.
39
+ init(
40
+ threadCount: Int = Constants.defaultThreadCount,
41
+ delegate: Delegates = Constants.defaultDelegate
42
+ ) throws {
43
+ // Construct the path to the model file.
44
+ guard
45
+ let modelPath = Bundle.main.path(
46
+ forResource: Model.file.name,
47
+ ofType: Model.file.extension
48
+ )
49
+ else {
50
+ fatalError("Failed to load the model file with name: \(Model.file.name).")
51
+ }
52
+
53
+ // Specify the options for the `Interpreter`.
54
+ var options = Interpreter.Options()
55
+ options.threadCount = threadCount
56
+
57
+ // Specify the delegates for the `Interpreter`.
58
+ var delegates: [Delegate]?
59
+ switch delegate {
60
+ case .Metal:
61
+ delegates = [MetalDelegate()]
62
+ case .CoreML:
63
+ if let coreMLDelegate = CoreMLDelegate() {
64
+ delegates = [coreMLDelegate]
65
+ } else {
66
+ delegates = nil
67
+ }
68
+ default:
69
+ delegates = nil
70
+ }
71
+
72
+ // Create the `Interpreter`.
73
+ interpreter = try Interpreter(modelPath: modelPath, options: options, delegates: delegates)
74
+
75
+ // Initialize input and output `Tensor`s.
76
+ // Allocate memory for the model's input `Tensor`s.
77
+ try interpreter.allocateTensors()
78
+
79
+ // Get allocated input and output `Tensor`s.
80
+ inputTensor = try interpreter.input(at: 0)
81
+ outputTensor = try interpreter.output(at: 0)
82
+ //heatsTensor = try interpreter.output(at: 0)
83
+ //offsetsTensor = try interpreter.output(at: 1)
84
+
85
+ /*
86
+ // Check if input and output `Tensor`s are in the expected formats.
87
+ guard (inputTensor.dataType == .uInt8) == Model.isQuantized else {
88
+ fatalError("Unexpected Model: quantization is \(!Model.isQuantized)")
89
+ }
90
+
91
+ guard inputTensor.shape.dimensions[0] == Model.input.batchSize,
92
+ inputTensor.shape.dimensions[1] == Model.input.height,
93
+ inputTensor.shape.dimensions[2] == Model.input.width,
94
+ inputTensor.shape.dimensions[3] == Model.input.channelSize
95
+ else {
96
+ fatalError("Unexpected Model: input shape")
97
+ }
98
+
99
+
100
+ guard heatsTensor.shape.dimensions[0] == Model.output.batchSize,
101
+ heatsTensor.shape.dimensions[1] == Model.output.height,
102
+ heatsTensor.shape.dimensions[2] == Model.output.width,
103
+ heatsTensor.shape.dimensions[3] == Model.output.keypointSize
104
+ else {
105
+ fatalError("Unexpected Model: heat tensor")
106
+ }
107
+
108
+ guard offsetsTensor.shape.dimensions[0] == Model.output.batchSize,
109
+ offsetsTensor.shape.dimensions[1] == Model.output.height,
110
+ offsetsTensor.shape.dimensions[2] == Model.output.width,
111
+ offsetsTensor.shape.dimensions[3] == Model.output.offsetSize
112
+ else {
113
+ fatalError("Unexpected Model: offset tensor")
114
+ }
115
+ */
116
+
117
+ }
118
+
119
+ /// Runs Midas model with given image with given source area to destination area.
120
+ ///
121
+ /// - Parameters:
122
+ /// - on: Input image to run the model.
123
+ /// - from: Range of input image to run the model.
124
+ /// - to: Size of view to render the result.
125
+ /// - Returns: Result of the inference and the times consumed in every steps.
126
+ func runMidas(on pixelbuffer: CVPixelBuffer, from source: CGRect, to dest: CGSize)
127
+ //-> (Result, Times)?
128
+ //-> (FlatArray<Float32>, Times)?
129
+ -> ([Float], Int, Int, Times)?
130
+ {
131
+ // Start times of each process.
132
+ let preprocessingStartTime: Date
133
+ let inferenceStartTime: Date
134
+ let postprocessingStartTime: Date
135
+
136
+ // Processing times in miliseconds.
137
+ let preprocessingTime: TimeInterval
138
+ let inferenceTime: TimeInterval
139
+ let postprocessingTime: TimeInterval
140
+
141
+ preprocessingStartTime = Date()
142
+ guard let data = preprocess(of: pixelbuffer, from: source) else {
143
+ os_log("Preprocessing failed", type: .error)
144
+ return nil
145
+ }
146
+ preprocessingTime = Date().timeIntervalSince(preprocessingStartTime) * 1000
147
+
148
+ inferenceStartTime = Date()
149
+ inference(from: data)
150
+ inferenceTime = Date().timeIntervalSince(inferenceStartTime) * 1000
151
+
152
+ postprocessingStartTime = Date()
153
+ //guard let result = postprocess(to: dest) else {
154
+ // os_log("Postprocessing failed", type: .error)
155
+ // return nil
156
+ //}
157
+ postprocessingTime = Date().timeIntervalSince(postprocessingStartTime) * 1000
158
+
159
+
160
+ let results: [Float]
161
+ switch outputTensor.dataType {
162
+ case .uInt8:
163
+ guard let quantization = outputTensor.quantizationParameters else {
164
+ print("No results returned because the quantization values for the output tensor are nil.")
165
+ return nil
166
+ }
167
+ let quantizedResults = [UInt8](outputTensor.data)
168
+ results = quantizedResults.map {
169
+ quantization.scale * Float(Int($0) - quantization.zeroPoint)
170
+ }
171
+ case .float32:
172
+ results = [Float32](unsafeData: outputTensor.data) ?? []
173
+ default:
174
+ print("Output tensor data type \(outputTensor.dataType) is unsupported for this example app.")
175
+ return nil
176
+ }
177
+
178
+
179
+ let times = Times(
180
+ preprocessing: preprocessingTime,
181
+ inference: inferenceTime,
182
+ postprocessing: postprocessingTime)
183
+
184
+ return (results, Model.input.width, Model.input.height, times)
185
+ }
186
+
187
+ // MARK: - Private functions to run model
188
+ /// Preprocesses given rectangle image to be `Data` of disired size by croping and resizing it.
189
+ ///
190
+ /// - Parameters:
191
+ /// - of: Input image to crop and resize.
192
+ /// - from: Target area to be cropped and resized.
193
+ /// - Returns: The cropped and resized image. `nil` if it can not be processed.
194
+ private func preprocess(of pixelBuffer: CVPixelBuffer, from targetSquare: CGRect) -> Data? {
195
+ let sourcePixelFormat = CVPixelBufferGetPixelFormatType(pixelBuffer)
196
+ assert(sourcePixelFormat == kCVPixelFormatType_32BGRA)
197
+
198
+ // Resize `targetSquare` of input image to `modelSize`.
199
+ let modelSize = CGSize(width: Model.input.width, height: Model.input.height)
200
+ guard let thumbnail = pixelBuffer.resize(from: targetSquare, to: modelSize)
201
+ else {
202
+ return nil
203
+ }
204
+
205
+ // Remove the alpha component from the image buffer to get the initialized `Data`.
206
+ let byteCount =
207
+ Model.input.batchSize
208
+ * Model.input.height * Model.input.width
209
+ * Model.input.channelSize
210
+ guard
211
+ let inputData = thumbnail.rgbData(
212
+ isModelQuantized: Model.isQuantized
213
+ )
214
+ else {
215
+ os_log("Failed to convert the image buffer to RGB data.", type: .error)
216
+ return nil
217
+ }
218
+
219
+ return inputData
220
+ }
221
+
222
+
223
+
224
+ /*
225
+ /// Postprocesses output `Tensor`s to `Result` with size of view to render the result.
226
+ ///
227
+ /// - Parameters:
228
+ /// - to: Size of view to be displaied.
229
+ /// - Returns: Postprocessed `Result`. `nil` if it can not be processed.
230
+ private func postprocess(to viewSize: CGSize) -> Result? {
231
+ // MARK: Formats output tensors
232
+ // Convert `Tensor` to `FlatArray`. As Midas is not quantized, convert them to Float type
233
+ // `FlatArray`.
234
+ let heats = FlatArray<Float32>(tensor: heatsTensor)
235
+ let offsets = FlatArray<Float32>(tensor: offsetsTensor)
236
+
237
+ // MARK: Find position of each key point
238
+ // Finds the (row, col) locations of where the keypoints are most likely to be. The highest
239
+ // `heats[0, row, col, keypoint]` value, the more likely `keypoint` being located in (`row`,
240
+ // `col`).
241
+ let keypointPositions = (0..<Model.output.keypointSize).map { keypoint -> (Int, Int) in
242
+ var maxValue = heats[0, 0, 0, keypoint]
243
+ var maxRow = 0
244
+ var maxCol = 0
245
+ for row in 0..<Model.output.height {
246
+ for col in 0..<Model.output.width {
247
+ if heats[0, row, col, keypoint] > maxValue {
248
+ maxValue = heats[0, row, col, keypoint]
249
+ maxRow = row
250
+ maxCol = col
251
+ }
252
+ }
253
+ }
254
+ return (maxRow, maxCol)
255
+ }
256
+
257
+ // MARK: Calculates total confidence score
258
+ // Calculates total confidence score of each key position.
259
+ let totalScoreSum = keypointPositions.enumerated().reduce(0.0) { accumulator, elem -> Float32 in
260
+ accumulator + sigmoid(heats[0, elem.element.0, elem.element.1, elem.offset])
261
+ }
262
+ let totalScore = totalScoreSum / Float32(Model.output.keypointSize)
263
+
264
+ // MARK: Calculate key point position on model input
265
+ // Calculates `KeyPoint` coordination model input image with `offsets` adjustment.
266
+ let coords = keypointPositions.enumerated().map { index, elem -> (y: Float32, x: Float32) in
267
+ let (y, x) = elem
268
+ let yCoord =
269
+ Float32(y) / Float32(Model.output.height - 1) * Float32(Model.input.height)
270
+ + offsets[0, y, x, index]
271
+ let xCoord =
272
+ Float32(x) / Float32(Model.output.width - 1) * Float32(Model.input.width)
273
+ + offsets[0, y, x, index + Model.output.keypointSize]
274
+ return (y: yCoord, x: xCoord)
275
+ }
276
+
277
+ // MARK: Transform key point position and make lines
278
+ // Make `Result` from `keypointPosition'. Each point is adjusted to `ViewSize` to be drawn.
279
+ var result = Result(dots: [], lines: [], score: totalScore)
280
+ var bodyPartToDotMap = [BodyPart: CGPoint]()
281
+ for (index, part) in BodyPart.allCases.enumerated() {
282
+ let position = CGPoint(
283
+ x: CGFloat(coords[index].x) * viewSize.width / CGFloat(Model.input.width),
284
+ y: CGFloat(coords[index].y) * viewSize.height / CGFloat(Model.input.height)
285
+ )
286
+ bodyPartToDotMap[part] = position
287
+ result.dots.append(position)
288
+ }
289
+
290
+ do {
291
+ try result.lines = BodyPart.lines.map { map throws -> Line in
292
+ guard let from = bodyPartToDotMap[map.from] else {
293
+ throw PostprocessError.missingBodyPart(of: map.from)
294
+ }
295
+ guard let to = bodyPartToDotMap[map.to] else {
296
+ throw PostprocessError.missingBodyPart(of: map.to)
297
+ }
298
+ return Line(from: from, to: to)
299
+ }
300
+ } catch PostprocessError.missingBodyPart(let missingPart) {
301
+ os_log("Postprocessing error: %s is missing.", type: .error, missingPart.rawValue)
302
+ return nil
303
+ } catch {
304
+ os_log("Postprocessing error: %s", type: .error, error.localizedDescription)
305
+ return nil
306
+ }
307
+
308
+ return result
309
+ }
310
+ */
311
+
312
+
313
+
314
+ /// Run inference with given `Data`
315
+ ///
316
+ /// Parameter `from`: `Data` of input image to run model.
317
+ private func inference(from data: Data) {
318
+ // Copy the initialized `Data` to the input `Tensor`.
319
+ do {
320
+ try interpreter.copy(data, toInputAt: 0)
321
+
322
+ // Run inference by invoking the `Interpreter`.
323
+ try interpreter.invoke()
324
+
325
+ // Get the output `Tensor` to process the inference results.
326
+ outputTensor = try interpreter.output(at: 0)
327
+ //heatsTensor = try interpreter.output(at: 0)
328
+ //offsetsTensor = try interpreter.output(at: 1)
329
+
330
+
331
+ } catch let error {
332
+ os_log(
333
+ "Failed to invoke the interpreter with error: %s", type: .error,
334
+ error.localizedDescription)
335
+ return
336
+ }
337
+ }
338
+
339
+ /// Returns value within [0,1].
340
+ private func sigmoid(_ x: Float32) -> Float32 {
341
+ return (1.0 / (1.0 + exp(-x)))
342
+ }
343
+ }
344
+
345
+ // MARK: - Data types for inference result
346
+ struct KeyPoint {
347
+ var bodyPart: BodyPart = BodyPart.NOSE
348
+ var position: CGPoint = CGPoint()
349
+ var score: Float = 0.0
350
+ }
351
+
352
+ struct Line {
353
+ let from: CGPoint
354
+ let to: CGPoint
355
+ }
356
+
357
+ struct Times {
358
+ var preprocessing: Double
359
+ var inference: Double
360
+ var postprocessing: Double
361
+ }
362
+
363
+ struct Result {
364
+ var dots: [CGPoint]
365
+ var lines: [Line]
366
+ var score: Float
367
+ }
368
+
369
+ enum BodyPart: String, CaseIterable {
370
+ case NOSE = "nose"
371
+ case LEFT_EYE = "left eye"
372
+ case RIGHT_EYE = "right eye"
373
+ case LEFT_EAR = "left ear"
374
+ case RIGHT_EAR = "right ear"
375
+ case LEFT_SHOULDER = "left shoulder"
376
+ case RIGHT_SHOULDER = "right shoulder"
377
+ case LEFT_ELBOW = "left elbow"
378
+ case RIGHT_ELBOW = "right elbow"
379
+ case LEFT_WRIST = "left wrist"
380
+ case RIGHT_WRIST = "right wrist"
381
+ case LEFT_HIP = "left hip"
382
+ case RIGHT_HIP = "right hip"
383
+ case LEFT_KNEE = "left knee"
384
+ case RIGHT_KNEE = "right knee"
385
+ case LEFT_ANKLE = "left ankle"
386
+ case RIGHT_ANKLE = "right ankle"
387
+
388
+ /// List of lines connecting each part.
389
+ static let lines = [
390
+ (from: BodyPart.LEFT_WRIST, to: BodyPart.LEFT_ELBOW),
391
+ (from: BodyPart.LEFT_ELBOW, to: BodyPart.LEFT_SHOULDER),
392
+ (from: BodyPart.LEFT_SHOULDER, to: BodyPart.RIGHT_SHOULDER),
393
+ (from: BodyPart.RIGHT_SHOULDER, to: BodyPart.RIGHT_ELBOW),
394
+ (from: BodyPart.RIGHT_ELBOW, to: BodyPart.RIGHT_WRIST),
395
+ (from: BodyPart.LEFT_SHOULDER, to: BodyPart.LEFT_HIP),
396
+ (from: BodyPart.LEFT_HIP, to: BodyPart.RIGHT_HIP),
397
+ (from: BodyPart.RIGHT_HIP, to: BodyPart.RIGHT_SHOULDER),
398
+ (from: BodyPart.LEFT_HIP, to: BodyPart.LEFT_KNEE),
399
+ (from: BodyPart.LEFT_KNEE, to: BodyPart.LEFT_ANKLE),
400
+ (from: BodyPart.RIGHT_HIP, to: BodyPart.RIGHT_KNEE),
401
+ (from: BodyPart.RIGHT_KNEE, to: BodyPart.RIGHT_ANKLE),
402
+ ]
403
+ }
404
+
405
+ // MARK: - Delegates Enum
406
+ enum Delegates: Int, CaseIterable {
407
+ case CPU
408
+ case Metal
409
+ case CoreML
410
+
411
+ var description: String {
412
+ switch self {
413
+ case .CPU:
414
+ return "CPU"
415
+ case .Metal:
416
+ return "GPU"
417
+ case .CoreML:
418
+ return "NPU"
419
+ }
420
+ }
421
+ }
422
+
423
+ // MARK: - Custom Errors
424
+ enum PostprocessError: Error {
425
+ case missingBodyPart(of: BodyPart)
426
+ }
427
+
428
+ // MARK: - Information about the model file.
429
+ typealias FileInfo = (name: String, extension: String)
430
+
431
+ enum Model {
432
+ static let file: FileInfo = (
433
+ name: "model_opt", extension: "tflite"
434
+ )
435
+
436
+ static let input = (batchSize: 1, height: 256, width: 256, channelSize: 3)
437
+ static let output = (batchSize: 1, height: 256, width: 256, channelSize: 1)
438
+ static let isQuantized = false
439
+ }
440
+
441
+
442
+ extension Array {
443
+ /// Creates a new array from the bytes of the given unsafe data.
444
+ ///
445
+ /// - Warning: The array's `Element` type must be trivial in that it can be copied bit for bit
446
+ /// with no indirection or reference-counting operations; otherwise, copying the raw bytes in
447
+ /// the `unsafeData`'s buffer to a new array returns an unsafe copy.
448
+ /// - Note: Returns `nil` if `unsafeData.count` is not a multiple of
449
+ /// `MemoryLayout<Element>.stride`.
450
+ /// - Parameter unsafeData: The data containing the bytes to turn into an array.
451
+ init?(unsafeData: Data) {
452
+ guard unsafeData.count % MemoryLayout<Element>.stride == 0 else { return nil }
453
+ #if swift(>=5.0)
454
+ self = unsafeData.withUnsafeBytes { .init($0.bindMemory(to: Element.self)) }
455
+ #else
456
+ self = unsafeData.withUnsafeBytes {
457
+ .init(UnsafeBufferPointer<Element>(
458
+ start: $0,
459
+ count: unsafeData.count / MemoryLayout<Element>.stride
460
+ ))
461
+ }
462
+ #endif // swift(>=5.0)
463
+ }
464
+ }
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/ios/Midas/Storyboards/Base.lproj/Launch Screen.storyboard ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8" standalone="no"?>
2
+ <document type="com.apple.InterfaceBuilder3.CocoaTouch.Storyboard.XIB" version="3.0" toolsVersion="13142" targetRuntime="iOS.CocoaTouch" propertyAccessControl="none" useAutolayout="YES" launchScreen="YES" useTraitCollections="YES" useSafeAreas="YES" colorMatched="YES" initialViewController="01J-lp-oVM">
3
+ <dependencies>
4
+ <plugIn identifier="com.apple.InterfaceBuilder.IBCocoaTouchPlugin" version="12042"/>
5
+ <capability name="Constraints with non-1.0 multipliers" minToolsVersion="5.1"/>
6
+ <capability name="Safe area layout guides" minToolsVersion="9.0"/>
7
+ <capability name="documents saved in the Xcode 8 format" minToolsVersion="8.0"/>
8
+ </dependencies>
9
+ <scenes>
10
+ <!--View Controller-->
11
+ <scene sceneID="EHf-IW-A2E">
12
+ <objects>
13
+ <viewController id="01J-lp-oVM" sceneMemberID="viewController">
14
+ <view key="view" contentMode="scaleToFill" id="Ze5-6b-2t3">
15
+ <rect key="frame" x="0.0" y="0.0" width="375" height="667"/>
16
+ <autoresizingMask key="autoresizingMask" widthSizable="YES" heightSizable="YES"/>
17
+ <subviews>
18
+ <label opaque="NO" clipsSubviews="YES" userInteractionEnabled="NO" contentMode="left" horizontalHuggingPriority="251" verticalHuggingPriority="251" text="Copyright © 2019 tensorflow. All rights reserved." textAlignment="center" lineBreakMode="tailTruncation" baselineAdjustment="alignBaselines" minimumFontSize="9" translatesAutoresizingMaskIntoConstraints="NO" id="obG-Y5-kRd">
19
+ <rect key="frame" x="0.0" y="626.5" width="375" height="20.5"/>
20
+ <fontDescription key="fontDescription" type="system" pointSize="17"/>
21
+ <color key="textColor" red="0.0" green="0.0" blue="0.0" alpha="1" colorSpace="custom" customColorSpace="sRGB"/>
22
+ <nil key="highlightedColor"/>
23
+ </label>
24
+ <label opaque="NO" clipsSubviews="YES" userInteractionEnabled="NO" contentMode="left" horizontalHuggingPriority="251" verticalHuggingPriority="251" text="Midas" textAlignment="center" lineBreakMode="middleTruncation" baselineAdjustment="alignBaselines" minimumFontSize="18" translatesAutoresizingMaskIntoConstraints="NO" id="GJd-Yh-RWb">
25
+ <rect key="frame" x="0.0" y="202" width="375" height="43"/>
26
+ <fontDescription key="fontDescription" type="boldSystem" pointSize="36"/>
27
+ <color key="textColor" red="0.0" green="0.0" blue="0.0" alpha="1" colorSpace="custom" customColorSpace="sRGB"/>
28
+ <nil key="highlightedColor"/>
29
+ </label>
30
+ </subviews>
31
+ <color key="backgroundColor" red="1" green="1" blue="1" alpha="1" colorSpace="custom" customColorSpace="sRGB"/>
32
+ <constraints>
33
+ <constraint firstItem="Bcu-3y-fUS" firstAttribute="centerX" secondItem="obG-Y5-kRd" secondAttribute="centerX" id="5cz-MP-9tL"/>
34
+ <constraint firstItem="Bcu-3y-fUS" firstAttribute="centerX" secondItem="GJd-Yh-RWb" secondAttribute="centerX" id="Q3B-4B-g5h"/>
35
+ <constraint firstItem="obG-Y5-kRd" firstAttribute="leading" secondItem="Bcu-3y-fUS" secondAttribute="leading" constant="20" symbolic="YES" id="SfN-ll-jLj"/>
36
+ <constraint firstAttribute="bottom" secondItem="obG-Y5-kRd" secondAttribute="bottom" constant="20" id="Y44-ml-fuU"/>
37
+ <constraint firstItem="GJd-Yh-RWb" firstAttribute="centerY" secondItem="Ze5-6b-2t3" secondAttribute="bottom" multiplier="1/3" constant="1" id="moa-c2-u7t"/>
38
+ <constraint firstItem="GJd-Yh-RWb" firstAttribute="leading" secondItem="Bcu-3y-fUS" secondAttribute="leading" constant="20" symbolic="YES" id="x7j-FC-K8j"/>
39
+ </constraints>
40
+ <viewLayoutGuide key="safeArea" id="Bcu-3y-fUS"/>
41
+ </view>
42
+ </viewController>
43
+ <placeholder placeholderIdentifier="IBFirstResponder" id="iYj-Kq-Ea1" userLabel="First Responder" sceneMemberID="firstResponder"/>
44
+ </objects>
45
+ <point key="canvasLocation" x="53" y="375"/>
46
+ </scene>
47
+ </scenes>
48
+ </document>
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/ios/Midas/Storyboards/Base.lproj/Main.storyboard ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <document type="com.apple.InterfaceBuilder3.CocoaTouch.Storyboard.XIB" version="3.0" toolsVersion="15505" targetRuntime="iOS.CocoaTouch" propertyAccessControl="none" useAutolayout="YES" useTraitCollections="YES" useSafeAreas="YES" colorMatched="YES" initialViewController="BYZ-38-t0r">
3
+ <device id="retina6_1" orientation="portrait" appearance="light"/>
4
+ <dependencies>
5
+ <deployment identifier="iOS"/>
6
+ <plugIn identifier="com.apple.InterfaceBuilder.IBCocoaTouchPlugin" version="15510"/>
7
+ <capability name="Safe area layout guides" minToolsVersion="9.0"/>
8
+ <capability name="documents saved in the Xcode 8 format" minToolsVersion="8.0"/>
9
+ </dependencies>
10
+ <scenes>
11
+ <!--View Controller-->
12
+ <scene sceneID="tne-QT-ifu">
13
+ <objects>
14
+ <viewController id="BYZ-38-t0r" customClass="ViewController" customModule="Midas" customModuleProvider="target" sceneMemberID="viewController">
15
+ <view key="view" contentMode="scaleToFill" id="8bC-Xf-vdC">
16
+ <rect key="frame" x="0.0" y="0.0" width="414" height="896"/>
17
+ <autoresizingMask key="autoresizingMask" widthSizable="YES" heightSizable="YES"/>
18
+ <subviews>
19
+ <view contentMode="scaleToFill" translatesAutoresizingMaskIntoConstraints="NO" id="aEU-Ov-crs" customClass="PreviewView" customModule="Midas" customModuleProvider="target">
20
+ <rect key="frame" x="0.0" y="0.0" width="414" height="896"/>
21
+ <subviews>
22
+ <button hidden="YES" opaque="NO" contentMode="scaleToFill" contentHorizontalAlignment="center" contentVerticalAlignment="center" buttonType="roundedRect" lineBreakMode="middleTruncation" translatesAutoresizingMaskIntoConstraints="NO" id="JeV-bW-Ogb">
23
+ <rect key="frame" x="150.5" y="433" width="113" height="30"/>
24
+ <state key="normal" title="Resume Session"/>
25
+ <connections>
26
+ <action selector="didTapResumeButton:" destination="BYZ-38-t0r" eventType="touchUpInside" id="eGE-XF-oGN"/>
27
+ </connections>
28
+ </button>
29
+ <label hidden="YES" opaque="NO" userInteractionEnabled="NO" contentMode="left" horizontalHuggingPriority="251" verticalHuggingPriority="251" text="Camera Unavailable" textAlignment="center" lineBreakMode="tailTruncation" baselineAdjustment="alignBaselines" adjustsFontSizeToFit="NO" translatesAutoresizingMaskIntoConstraints="NO" id="xja-Am-Oc5">
30
+ <rect key="frame" x="111.5" y="371.5" width="191" height="26.5"/>
31
+ <fontDescription key="fontDescription" type="system" pointSize="22"/>
32
+ <nil key="textColor"/>
33
+ <nil key="highlightedColor"/>
34
+ </label>
35
+ <view contentMode="scaleToFill" insetsLayoutMarginsFromSafeArea="NO" translatesAutoresizingMaskIntoConstraints="NO" id="tWv-pt-h17" userLabel="TopView">
36
+ <rect key="frame" x="0.0" y="0.0" width="414" height="119"/>
37
+ <subviews>
38
+ <imageView clipsSubviews="YES" userInteractionEnabled="NO" contentMode="scaleAspectFit" horizontalHuggingPriority="251" verticalHuggingPriority="251" image="tfl_logo.png" translatesAutoresizingMaskIntoConstraints="NO" id="ijS-RU-qlo">
39
+ <rect key="frame" x="60" y="42.5" width="294" height="68"/>
40
+ </imageView>
41
+ </subviews>
42
+ <color key="backgroundColor" white="0.0" alpha="0.80000000000000004" colorSpace="custom" customColorSpace="genericGamma22GrayColorSpace"/>
43
+ <constraints>
44
+ <constraint firstItem="ijS-RU-qlo" firstAttribute="bottom" secondItem="tWv-pt-h17" secondAttribute="bottom" multiplier="6.5:7" id="06z-Bq-BW1"/>
45
+ <constraint firstItem="ijS-RU-qlo" firstAttribute="centerX" secondItem="tWv-pt-h17" secondAttribute="centerX" id="DjM-bj-7Aa"/>
46
+ <constraint firstItem="ijS-RU-qlo" firstAttribute="height" secondItem="tWv-pt-h17" secondAttribute="height" multiplier="4:7" id="fU4-ZD-b2i"/>
47
+ </constraints>
48
+ </view>
49
+ <view opaque="NO" contentMode="scaleAspectFit" translatesAutoresizingMaskIntoConstraints="NO" id="FLM-9o-o4W" customClass="OverlayView" customModule="Midas" customModuleProvider="target">
50
+ <rect key="frame" x="0.0" y="119" width="414" height="414"/>
51
+ <constraints>
52
+ <constraint firstAttribute="width" secondItem="FLM-9o-o4W" secondAttribute="height" multiplier="1:1" id="1T9-Bs-Jec"/>
53
+ </constraints>
54
+ <edgeInsets key="layoutMargins" top="8" left="8" bottom="8" right="8"/>
55
+ </view>
56
+ <view contentMode="scaleToFill" translatesAutoresizingMaskIntoConstraints="NO" id="nFg-A5-Dew" userLabel="BottomView">
57
+ <rect key="frame" x="0.0" y="533" width="414" height="363"/>
58
+ <subviews>
59
+ <tableView clipsSubviews="YES" userInteractionEnabled="NO" contentMode="scaleToFill" bounces="NO" scrollEnabled="NO" delaysContentTouches="NO" canCancelContentTouches="NO" bouncesZoom="NO" dataMode="prototypes" style="plain" separatorStyle="none" allowsMultipleSelection="YES" rowHeight="-1" estimatedRowHeight="-1" sectionHeaderHeight="28" sectionFooterHeight="28" translatesAutoresizingMaskIntoConstraints="NO" id="3e4-S8-Ouh">
60
+ <rect key="frame" x="60" y="30" width="294" height="163"/>
61
+ <color key="backgroundColor" white="0.0" alpha="0.0" colorSpace="custom" customColorSpace="genericGamma22GrayColorSpace"/>
62
+ <color key="tintColor" white="1" alpha="1" colorSpace="custom" customColorSpace="genericGamma22GrayColorSpace"/>
63
+ <color key="separatorColor" white="0.0" alpha="0.0" colorSpace="custom" customColorSpace="genericGamma22GrayColorSpace"/>
64
+ <color key="sectionIndexColor" white="1" alpha="1" colorSpace="custom" customColorSpace="genericGamma22GrayColorSpace"/>
65
+ <color key="sectionIndexBackgroundColor" white="0.0" alpha="0.0" colorSpace="custom" customColorSpace="genericGamma22GrayColorSpace"/>
66
+ <color key="sectionIndexTrackingBackgroundColor" white="0.0" alpha="0.0" colorSpace="custom" customColorSpace="genericGamma22GrayColorSpace"/>
67
+ <prototypes>
68
+ <tableViewCell clipsSubviews="YES" contentMode="scaleToFill" preservesSuperviewLayoutMargins="YES" selectionStyle="default" indentationWidth="10" reuseIdentifier="InfoCell" id="THr-Uf-ggb" customClass="InfoCell" customModule="Midas" customModuleProvider="target">
69
+ <rect key="frame" x="0.0" y="28" width="294" height="44"/>
70
+ <autoresizingMask key="autoresizingMask"/>
71
+ <tableViewCellContentView key="contentView" opaque="NO" clipsSubviews="YES" multipleTouchEnabled="YES" contentMode="center" preservesSuperviewLayoutMargins="YES" insetsLayoutMarginsFromSafeArea="NO" tableViewCell="THr-Uf-ggb" id="5T1-ZQ-m0o">
72
+ <rect key="frame" x="0.0" y="0.0" width="294" height="44"/>
73
+ <autoresizingMask key="autoresizingMask"/>
74
+ <subviews>
75
+ <label opaque="NO" userInteractionEnabled="NO" contentMode="left" horizontalHuggingPriority="251" verticalHuggingPriority="251" text="Field Label" lineBreakMode="clip" baselineAdjustment="alignBaselines" adjustsFontSizeToFit="NO" translatesAutoresizingMaskIntoConstraints="NO" id="mTG-UQ-Lwc" userLabel="Field Name Label">
76
+ <rect key="frame" x="0.0" y="0.0" width="176.5" height="19.5"/>
77
+ <color key="backgroundColor" white="0.0" alpha="0.0" colorSpace="custom" customColorSpace="genericGamma22GrayColorSpace"/>
78
+ <color key="tintColor" white="1" alpha="1" colorSpace="custom" customColorSpace="genericGamma22GrayColorSpace"/>
79
+ <fontDescription key="fontDescription" type="system" weight="medium" pointSize="16"/>
80
+ <color key="textColor" white="1" alpha="1" colorSpace="custom" customColorSpace="genericGamma22GrayColorSpace"/>
81
+ <color key="highlightedColor" white="1" alpha="1" colorSpace="custom" customColorSpace="genericGamma22GrayColorSpace"/>
82
+ </label>
83
+ <label opaque="NO" userInteractionEnabled="NO" contentMode="left" horizontalHuggingPriority="100" verticalHuggingPriority="251" text="Info Label" textAlignment="right" lineBreakMode="tailTruncation" baselineAdjustment="alignBaselines" adjustsFontSizeToFit="NO" translatesAutoresizingMaskIntoConstraints="NO" id="3kP-Ho-Tm4" userLabel="Info Label">
84
+ <rect key="frame" x="176.5" y="0.0" width="117.5" height="19.5"/>
85
+ <color key="backgroundColor" white="0.0" alpha="0.0" colorSpace="custom" customColorSpace="genericGamma22GrayColorSpace"/>
86
+ <color key="tintColor" white="1" alpha="1" colorSpace="custom" customColorSpace="genericGamma22GrayColorSpace"/>
87
+ <fontDescription key="fontDescription" type="system" pointSize="16"/>
88
+ <color key="textColor" white="1" alpha="1" colorSpace="custom" customColorSpace="genericGamma22GrayColorSpace"/>
89
+ <color key="highlightedColor" white="1" alpha="1" colorSpace="custom" customColorSpace="genericGamma22GrayColorSpace"/>
90
+ </label>
91
+ </subviews>
92
+ <color key="backgroundColor" white="0.0" alpha="0.0" colorSpace="custom" customColorSpace="genericGamma22GrayColorSpace"/>
93
+ <color key="tintColor" white="1" alpha="1" colorSpace="custom" customColorSpace="genericGamma22GrayColorSpace"/>
94
+ <constraints>
95
+ <constraint firstItem="mTG-UQ-Lwc" firstAttribute="top" secondItem="5T1-ZQ-m0o" secondAttribute="top" id="4yW-Lh-MPd"/>
96
+ <constraint firstItem="3kP-Ho-Tm4" firstAttribute="top" secondItem="5T1-ZQ-m0o" secondAttribute="top" id="JOU-KJ-V73"/>
97
+ <constraint firstItem="3kP-Ho-Tm4" firstAttribute="width" secondItem="5T1-ZQ-m0o" secondAttribute="width" multiplier="0.4" id="NXu-3C-w9k"/>
98
+ <constraint firstItem="mTG-UQ-Lwc" firstAttribute="width" secondItem="5T1-ZQ-m0o" secondAttribute="width" multiplier="0.6" id="Vvh-cz-q4K"/>
99
+ <constraint firstItem="3kP-Ho-Tm4" firstAttribute="leading" secondItem="mTG-UQ-Lwc" secondAttribute="trailing" id="kwA-sz-1zt"/>
100
+ <constraint firstItem="mTG-UQ-Lwc" firstAttribute="leading" secondItem="5T1-ZQ-m0o" secondAttribute="leading" id="kxa-T6-w0h"/>
101
+ </constraints>
102
+ </tableViewCellContentView>
103
+ <color key="backgroundColor" white="0.0" alpha="0.0" colorSpace="custom" customColorSpace="genericGamma22GrayColorSpace"/>
104
+ <constraints>
105
+ <constraint firstItem="3kP-Ho-Tm4" firstAttribute="top" secondItem="THr-Uf-ggb" secondAttribute="top" id="XTX-Dy-sDb"/>
106
+ <constraint firstItem="3kP-Ho-Tm4" firstAttribute="trailing" secondItem="THr-Uf-ggb" secondAttribute="trailing" id="kX7-5L-aoX"/>
107
+ </constraints>
108
+ <connections>
109
+ <outlet property="fieldNameLabel" destination="mTG-UQ-Lwc" id="7HD-r4-86n"/>
110
+ <outlet property="infoLabel" destination="3kP-Ho-Tm4" id="nMw-We-0cp"/>
111
+ </connections>
112
+ </tableViewCell>
113
+ </prototypes>
114
+ <connections>
115
+ <outlet property="dataSource" destination="BYZ-38-t0r" id="8Ah-El-SjN"/>
116
+ <outlet property="delegate" destination="BYZ-38-t0r" id="bw0-yQ-3jW"/>
117
+ </connections>
118
+ </tableView>
119
+ <stackView opaque="NO" contentMode="scaleToFill" spacing="15" translatesAutoresizingMaskIntoConstraints="NO" id="cOj-2t-SbP" userLabel="Thread Stack View">
120
+ <rect key="frame" x="35" y="203" width="344" height="30"/>
121
+ <subviews>
122
+ <label opaque="NO" userInteractionEnabled="NO" contentMode="left" verticalHuggingPriority="251" text="Threads" textAlignment="natural" lineBreakMode="tailTruncation" baselineAdjustment="alignBaselines" adjustsFontSizeToFit="NO" translatesAutoresizingMaskIntoConstraints="NO" id="eX7-9U-WcH">
123
+ <rect key="frame" x="0.0" y="0.0" width="209" height="30"/>
124
+ <fontDescription key="fontDescription" type="boldSystem" pointSize="16"/>
125
+ <color key="textColor" white="1" alpha="1" colorSpace="custom" customColorSpace="genericGamma22GrayColorSpace"/>
126
+ <nil key="highlightedColor"/>
127
+ </label>
128
+ <label opaque="NO" userInteractionEnabled="NO" contentMode="left" horizontalHuggingPriority="251" verticalHuggingPriority="251" text="0" textAlignment="natural" lineBreakMode="tailTruncation" baselineAdjustment="alignBaselines" adjustsFontSizeToFit="NO" translatesAutoresizingMaskIntoConstraints="NO" id="bIb-UK-3tz">
129
+ <rect key="frame" x="224" y="0.0" width="11" height="30"/>
130
+ <fontDescription key="fontDescription" type="boldSystem" pointSize="16"/>
131
+ <color key="textColor" white="1" alpha="1" colorSpace="custom" customColorSpace="genericGamma22GrayColorSpace"/>
132
+ <color key="highlightedColor" white="1" alpha="1" colorSpace="custom" customColorSpace="genericGamma22GrayColorSpace"/>
133
+ </label>
134
+ <stepper opaque="NO" contentMode="scaleToFill" horizontalHuggingPriority="750" verticalHuggingPriority="750" contentHorizontalAlignment="center" contentVerticalAlignment="center" value="1" minimumValue="1" maximumValue="10" translatesAutoresizingMaskIntoConstraints="NO" id="P6P-6m-RWk">
135
+ <rect key="frame" x="250" y="0.0" width="94" height="30"/>
136
+ <color key="tintColor" white="1" alpha="1" colorSpace="custom" customColorSpace="genericGamma22GrayColorSpace"/>
137
+ <connections>
138
+ <action selector="didChangeThreadCount:" destination="BYZ-38-t0r" eventType="valueChanged" id="Ty8-5M-GAN"/>
139
+ </connections>
140
+ </stepper>
141
+ </subviews>
142
+ <constraints>
143
+ <constraint firstAttribute="height" constant="30" id="PmW-q1-hKU"/>
144
+ </constraints>
145
+ </stackView>
146
+ <stackView opaque="NO" contentMode="scaleToFill" spacing="10" translatesAutoresizingMaskIntoConstraints="NO" id="RXH-7X-MUd" userLabel="Delegates Stack View">
147
+ <rect key="frame" x="35" y="253" width="344" height="31"/>
148
+ <subviews>
149
+ <label opaque="NO" userInteractionEnabled="NO" contentMode="left" horizontalHuggingPriority="251" verticalHuggingPriority="251" text="Delegates" textAlignment="natural" lineBreakMode="tailTruncation" baselineAdjustment="alignBaselines" adjustsFontSizeToFit="NO" translatesAutoresizingMaskIntoConstraints="NO" id="gmc-69-mzQ" userLabel="Delegates label">
150
+ <rect key="frame" x="0.0" y="0.0" width="78.5" height="31"/>
151
+ <fontDescription key="fontDescription" type="boldSystem" pointSize="16"/>
152
+ <color key="textColor" white="1" alpha="1" colorSpace="custom" customColorSpace="genericGamma22GrayColorSpace"/>
153
+ <nil key="highlightedColor"/>
154
+ </label>
155
+ <segmentedControl opaque="NO" contentMode="scaleToFill" contentHorizontalAlignment="left" contentVerticalAlignment="top" segmentControlStyle="plain" selectedSegmentIndex="0" translatesAutoresizingMaskIntoConstraints="NO" id="kS9-M8-aNu">
156
+ <rect key="frame" x="88.5" y="0.0" width="255.5" height="32"/>
157
+ <segments>
158
+ <segment title="First"/>
159
+ <segment title="Second"/>
160
+ </segments>
161
+ <connections>
162
+ <action selector="didChangeDelegate:" destination="BYZ-38-t0r" eventType="valueChanged" id="mGg-gI-RL2"/>
163
+ </connections>
164
+ </segmentedControl>
165
+ </subviews>
166
+ </stackView>
167
+ </subviews>
168
+ <color key="backgroundColor" red="0.0" green="0.0" blue="0.0" alpha="0.80000000000000004" colorSpace="custom" customColorSpace="sRGB"/>
169
+ <constraints>
170
+ <constraint firstItem="RXH-7X-MUd" firstAttribute="leading" secondItem="cOj-2t-SbP" secondAttribute="leading" id="3wP-lN-o3t"/>
171
+ <constraint firstItem="RXH-7X-MUd" firstAttribute="trailing" secondItem="cOj-2t-SbP" secondAttribute="trailing" id="Ej9-gR-bWk"/>
172
+ <constraint firstItem="cOj-2t-SbP" firstAttribute="width" secondItem="nFg-A5-Dew" secondAttribute="width" constant="-70" id="VyB-yT-Tey"/>
173
+ <constraint firstItem="3e4-S8-Ouh" firstAttribute="top" secondItem="nFg-A5-Dew" secondAttribute="top" constant="30" id="XBT-uR-wI0"/>
174
+ <constraint firstAttribute="bottom" relation="greaterThanOrEqual" secondItem="cOj-2t-SbP" secondAttribute="bottom" priority="250" constant="30" id="YjT-kc-3Yi"/>
175
+ <constraint firstItem="3e4-S8-Ouh" firstAttribute="bottom" secondItem="cOj-2t-SbP" secondAttribute="top" constant="-10" id="lEI-3O-3zh"/>
176
+ <constraint firstItem="cOj-2t-SbP" firstAttribute="bottom" secondItem="RXH-7X-MUd" secondAttribute="top" constant="-20" id="npw-gf-o7m"/>
177
+ <constraint firstItem="3e4-S8-Ouh" firstAttribute="width" secondItem="nFg-A5-Dew" secondAttribute="width" constant="-120" id="orY-Ah-Jzt"/>
178
+ <constraint firstItem="3e4-S8-Ouh" firstAttribute="centerX" secondItem="nFg-A5-Dew" secondAttribute="centerX" id="tIF-GC-QXD"/>
179
+ <constraint firstItem="cOj-2t-SbP" firstAttribute="centerX" secondItem="nFg-A5-Dew" secondAttribute="centerX" id="xwh-fU-JP1"/>
180
+ </constraints>
181
+ </view>
182
+ </subviews>
183
+ <color key="backgroundColor" white="1" alpha="1" colorSpace="custom" customColorSpace="genericGamma22GrayColorSpace"/>
184
+ <constraints>
185
+ <constraint firstItem="JeV-bW-Ogb" firstAttribute="top" secondItem="xja-Am-Oc5" secondAttribute="bottom" constant="35" id="19f-cQ-jfR"/>
186
+ <constraint firstItem="tWv-pt-h17" firstAttribute="leading" secondItem="aEU-Ov-crs" secondAttribute="leading" id="1Jc-ch-65t"/>
187
+ <constraint firstItem="FLM-9o-o4W" firstAttribute="leading" secondItem="aEU-Ov-crs" secondAttribute="leading" id="23L-Hb-zus"/>
188
+ <constraint firstItem="nFg-A5-Dew" firstAttribute="leading" secondItem="aEU-Ov-crs" secondAttribute="leading" id="2aq-VQ-8jy"/>
189
+ <constraint firstItem="tWv-pt-h17" firstAttribute="top" secondItem="aEU-Ov-crs" secondAttribute="top" id="7aI-Bb-gUy"/>
190
+ <constraint firstItem="FLM-9o-o4W" firstAttribute="width" secondItem="aEU-Ov-crs" secondAttribute="width" priority="750" id="94P-51-fMI"/>
191
+ <constraint firstItem="FLM-9o-o4W" firstAttribute="height" relation="lessThanOrEqual" secondItem="aEU-Ov-crs" secondAttribute="height" id="W3V-ka-9Fq"/>
192
+ <constraint firstItem="FLM-9o-o4W" firstAttribute="height" secondItem="aEU-Ov-crs" secondAttribute="height" priority="750" id="Ycv-Lj-8Xu"/>
193
+ <constraint firstItem="tWv-pt-h17" firstAttribute="width" secondItem="aEU-Ov-crs" secondAttribute="width" id="Yni-oR-3Bk"/>
194
+ <constraint firstItem="JeV-bW-Ogb" firstAttribute="centerX" secondItem="aEU-Ov-crs" secondAttribute="centerX" id="biE-2Z-tkx"/>
195
+ <constraint firstItem="JeV-bW-Ogb" firstAttribute="centerY" secondItem="aEU-Ov-crs" secondAttribute="centerY" id="c8W-L8-dcv"/>
196
+ <constraint firstAttribute="bottom" secondItem="nFg-A5-Dew" secondAttribute="bottom" id="dxP-KD-5bd"/>
197
+ <constraint firstItem="xja-Am-Oc5" firstAttribute="centerX" secondItem="aEU-Ov-crs" secondAttribute="centerX" id="ehn-uM-mdg"/>
198
+ <constraint firstItem="nFg-A5-Dew" firstAttribute="width" secondItem="aEU-Ov-crs" secondAttribute="width" id="peY-3I-8cV"/>
199
+ <constraint firstItem="nFg-A5-Dew" firstAttribute="top" secondItem="FLM-9o-o4W" secondAttribute="bottom" id="sFi-YM-EYA"/>
200
+ <constraint firstItem="FLM-9o-o4W" firstAttribute="top" secondItem="tWv-pt-h17" secondAttribute="bottom" id="trj-PE-5oc"/>
201
+ <constraint firstItem="FLM-9o-o4W" firstAttribute="width" relation="lessThanOrEqual" secondItem="aEU-Ov-crs" secondAttribute="width" id="zae-wS-Evw"/>
202
+ </constraints>
203
+ </view>
204
+ </subviews>
205
+ <color key="backgroundColor" red="1" green="1" blue="1" alpha="1" colorSpace="custom" customColorSpace="sRGB"/>
206
+ <constraints>
207
+ <constraint firstItem="aEU-Ov-crs" firstAttribute="bottom" secondItem="8bC-Xf-vdC" secondAttribute="bottom" id="3OD-6W-uRh"/>
208
+ <constraint firstItem="RXH-7X-MUd" firstAttribute="centerX" secondItem="6Tk-OE-BBY" secondAttribute="centerX" id="Hiz-DI-CwX"/>
209
+ <constraint firstItem="6Tk-OE-BBY" firstAttribute="bottom" secondItem="RXH-7X-MUd" secondAttribute="bottom" constant="45" id="OGm-6n-JMM"/>
210
+ <constraint firstItem="aEU-Ov-crs" firstAttribute="leading" secondItem="8bC-Xf-vdC" secondAttribute="leading" id="e9c-LL-cJf"/>
211
+ <constraint firstItem="aEU-Ov-crs" firstAttribute="top" secondItem="8bC-Xf-vdC" secondAttribute="top" id="eyF-LV-5Jb"/>
212
+ <constraint firstItem="aEU-Ov-crs" firstAttribute="trailing" secondItem="8bC-Xf-vdC" secondAttribute="trailing" id="g7I-ct-vQc"/>
213
+ <constraint firstItem="tWv-pt-h17" firstAttribute="bottom" secondItem="6Tk-OE-BBY" secondAttribute="top" constant="75" id="oFt-dT-p0b"/>
214
+ </constraints>
215
+ <viewLayoutGuide key="safeArea" id="6Tk-OE-BBY"/>
216
+ </view>
217
+ <connections>
218
+ <outlet property="cameraUnavailableLabel" destination="xja-Am-Oc5" id="g9Q-Mh-1ct"/>
219
+ <outlet property="delegatesControl" destination="kS9-M8-aNu" id="aq5-y6-e3w"/>
220
+ <outlet property="overlayView" destination="FLM-9o-o4W" id="06u-Ci-QDR"/>
221
+ <outlet property="previewView" destination="aEU-Ov-crs" id="NMN-in-8FS"/>
222
+ <outlet property="resumeButton" destination="JeV-bW-Ogb" id="Y7c-x9-3t3"/>
223
+ <outlet property="tableView" destination="3e4-S8-Ouh" id="bhz-PY-Fhd"/>
224
+ <outlet property="threadCountLabel" destination="bIb-UK-3tz" id="LvS-wv-2Pq"/>
225
+ <outlet property="threadCountStepper" destination="P6P-6m-RWk" id="zPW-0r-KYP"/>
226
+ </connections>
227
+ </viewController>
228
+ <placeholder placeholderIdentifier="IBFirstResponder" id="dkx-z0-nzr" sceneMemberID="firstResponder"/>
229
+ </objects>
230
+ <point key="canvasLocation" x="137.68115942028987" y="137.94642857142856"/>
231
+ </scene>
232
+ </scenes>
233
+ <resources>
234
+ <image name="tfl_logo.png" width="294" height="47"/>
235
+ </resources>
236
+ </document>
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/ios/Midas/ViewControllers/ViewController.swift ADDED
@@ -0,0 +1,489 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
+ //
3
+ // Licensed under the Apache License, Version 2.0 (the "License");
4
+ // you may not use this file except in compliance with the License.
5
+ // You may obtain a copy of the License at
6
+ //
7
+ // http://www.apache.org/licenses/LICENSE-2.0
8
+ //
9
+ // Unless required by applicable law or agreed to in writing, software
10
+ // distributed under the License is distributed on an "AS IS" BASIS,
11
+ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ // See the License for the specific language governing permissions and
13
+ // limitations under the License.
14
+
15
+ import AVFoundation
16
+ import UIKit
17
+ import os
18
+
19
+
20
+ public struct PixelData {
21
+ var a: UInt8
22
+ var r: UInt8
23
+ var g: UInt8
24
+ var b: UInt8
25
+ }
26
+
27
+ extension UIImage {
28
+ convenience init?(pixels: [PixelData], width: Int, height: Int) {
29
+ guard width > 0 && height > 0, pixels.count == width * height else { return nil }
30
+ var data = pixels
31
+ guard let providerRef = CGDataProvider(data: Data(bytes: &data, count: data.count * MemoryLayout<PixelData>.size) as CFData)
32
+ else { return nil }
33
+ guard let cgim = CGImage(
34
+ width: width,
35
+ height: height,
36
+ bitsPerComponent: 8,
37
+ bitsPerPixel: 32,
38
+ bytesPerRow: width * MemoryLayout<PixelData>.size,
39
+ space: CGColorSpaceCreateDeviceRGB(),
40
+ bitmapInfo: CGBitmapInfo(rawValue: CGImageAlphaInfo.premultipliedFirst.rawValue),
41
+ provider: providerRef,
42
+ decode: nil,
43
+ shouldInterpolate: false,
44
+ intent: .defaultIntent)
45
+ else { return nil }
46
+ self.init(cgImage: cgim)
47
+ }
48
+ }
49
+
50
+
51
+ class ViewController: UIViewController {
52
+ // MARK: Storyboards Connections
53
+ @IBOutlet weak var previewView: PreviewView!
54
+
55
+ //@IBOutlet weak var overlayView: OverlayView!
56
+ @IBOutlet weak var overlayView: UIImageView!
57
+
58
+ private var imageView : UIImageView = UIImageView(frame:CGRect(x:0, y:0, width:400, height:400))
59
+
60
+ private var imageViewInitialized: Bool = false
61
+
62
+ @IBOutlet weak var resumeButton: UIButton!
63
+ @IBOutlet weak var cameraUnavailableLabel: UILabel!
64
+
65
+ @IBOutlet weak var tableView: UITableView!
66
+
67
+ @IBOutlet weak var threadCountLabel: UILabel!
68
+ @IBOutlet weak var threadCountStepper: UIStepper!
69
+
70
+ @IBOutlet weak var delegatesControl: UISegmentedControl!
71
+
72
+ // MARK: ModelDataHandler traits
73
+ var threadCount: Int = Constants.defaultThreadCount
74
+ var delegate: Delegates = Constants.defaultDelegate
75
+
76
+ // MARK: Result Variables
77
+ // Inferenced data to render.
78
+ private var inferencedData: InferencedData?
79
+
80
+ // Minimum score to render the result.
81
+ private let minimumScore: Float = 0.5
82
+
83
+ private var avg_latency: Double = 0.0
84
+
85
+ // Relative location of `overlayView` to `previewView`.
86
+ private var overlayViewFrame: CGRect?
87
+
88
+ private var previewViewFrame: CGRect?
89
+
90
+ // MARK: Controllers that manage functionality
91
+ // Handles all the camera related functionality
92
+ private lazy var cameraCapture = CameraFeedManager(previewView: previewView)
93
+
94
+ // Handles all data preprocessing and makes calls to run inference.
95
+ private var modelDataHandler: ModelDataHandler?
96
+
97
+ // MARK: View Handling Methods
98
+ override func viewDidLoad() {
99
+ super.viewDidLoad()
100
+
101
+ do {
102
+ modelDataHandler = try ModelDataHandler()
103
+ } catch let error {
104
+ fatalError(error.localizedDescription)
105
+ }
106
+
107
+ cameraCapture.delegate = self
108
+ tableView.delegate = self
109
+ tableView.dataSource = self
110
+
111
+ // MARK: UI Initialization
112
+ // Setup thread count stepper with white color.
113
+ // https://forums.developer.apple.com/thread/121495
114
+ threadCountStepper.setDecrementImage(
115
+ threadCountStepper.decrementImage(for: .normal), for: .normal)
116
+ threadCountStepper.setIncrementImage(
117
+ threadCountStepper.incrementImage(for: .normal), for: .normal)
118
+ // Setup initial stepper value and its label.
119
+ threadCountStepper.value = Double(Constants.defaultThreadCount)
120
+ threadCountLabel.text = Constants.defaultThreadCount.description
121
+
122
+ // Setup segmented controller's color.
123
+ delegatesControl.setTitleTextAttributes(
124
+ [NSAttributedString.Key.foregroundColor: UIColor.lightGray],
125
+ for: .normal)
126
+ delegatesControl.setTitleTextAttributes(
127
+ [NSAttributedString.Key.foregroundColor: UIColor.black],
128
+ for: .selected)
129
+ // Remove existing segments to initialize it with `Delegates` entries.
130
+ delegatesControl.removeAllSegments()
131
+ Delegates.allCases.forEach { delegate in
132
+ delegatesControl.insertSegment(
133
+ withTitle: delegate.description,
134
+ at: delegate.rawValue,
135
+ animated: false)
136
+ }
137
+ delegatesControl.selectedSegmentIndex = 0
138
+ }
139
+
140
+ override func viewWillAppear(_ animated: Bool) {
141
+ super.viewWillAppear(animated)
142
+
143
+ cameraCapture.checkCameraConfigurationAndStartSession()
144
+ }
145
+
146
+ override func viewWillDisappear(_ animated: Bool) {
147
+ cameraCapture.stopSession()
148
+ }
149
+
150
+ override func viewDidLayoutSubviews() {
151
+ overlayViewFrame = overlayView.frame
152
+ previewViewFrame = previewView.frame
153
+ }
154
+
155
+ // MARK: Button Actions
156
+ @IBAction func didChangeThreadCount(_ sender: UIStepper) {
157
+ let changedCount = Int(sender.value)
158
+ if threadCountLabel.text == changedCount.description {
159
+ return
160
+ }
161
+
162
+ do {
163
+ modelDataHandler = try ModelDataHandler(threadCount: changedCount, delegate: delegate)
164
+ } catch let error {
165
+ fatalError(error.localizedDescription)
166
+ }
167
+ threadCount = changedCount
168
+ threadCountLabel.text = changedCount.description
169
+ os_log("Thread count is changed to: %d", threadCount)
170
+ }
171
+
172
+ @IBAction func didChangeDelegate(_ sender: UISegmentedControl) {
173
+ guard let changedDelegate = Delegates(rawValue: delegatesControl.selectedSegmentIndex) else {
174
+ fatalError("Unexpected value from delegates segemented controller.")
175
+ }
176
+ do {
177
+ modelDataHandler = try ModelDataHandler(threadCount: threadCount, delegate: changedDelegate)
178
+ } catch let error {
179
+ fatalError(error.localizedDescription)
180
+ }
181
+ delegate = changedDelegate
182
+ os_log("Delegate is changed to: %s", delegate.description)
183
+ }
184
+
185
+ @IBAction func didTapResumeButton(_ sender: Any) {
186
+ cameraCapture.resumeInterruptedSession { complete in
187
+
188
+ if complete {
189
+ self.resumeButton.isHidden = true
190
+ self.cameraUnavailableLabel.isHidden = true
191
+ } else {
192
+ self.presentUnableToResumeSessionAlert()
193
+ }
194
+ }
195
+ }
196
+
197
+ func presentUnableToResumeSessionAlert() {
198
+ let alert = UIAlertController(
199
+ title: "Unable to Resume Session",
200
+ message: "There was an error while attempting to resume session.",
201
+ preferredStyle: .alert
202
+ )
203
+ alert.addAction(UIAlertAction(title: "OK", style: .default, handler: nil))
204
+
205
+ self.present(alert, animated: true)
206
+ }
207
+ }
208
+
209
+ // MARK: - CameraFeedManagerDelegate Methods
210
+ extension ViewController: CameraFeedManagerDelegate {
211
+ func cameraFeedManager(_ manager: CameraFeedManager, didOutput pixelBuffer: CVPixelBuffer) {
212
+ runModel(on: pixelBuffer)
213
+ }
214
+
215
+ // MARK: Session Handling Alerts
216
+ func cameraFeedManagerDidEncounterSessionRunTimeError(_ manager: CameraFeedManager) {
217
+ // Handles session run time error by updating the UI and providing a button if session can be
218
+ // manually resumed.
219
+ self.resumeButton.isHidden = false
220
+ }
221
+
222
+ func cameraFeedManager(
223
+ _ manager: CameraFeedManager, sessionWasInterrupted canResumeManually: Bool
224
+ ) {
225
+ // Updates the UI when session is interupted.
226
+ if canResumeManually {
227
+ self.resumeButton.isHidden = false
228
+ } else {
229
+ self.cameraUnavailableLabel.isHidden = false
230
+ }
231
+ }
232
+
233
+ func cameraFeedManagerDidEndSessionInterruption(_ manager: CameraFeedManager) {
234
+ // Updates UI once session interruption has ended.
235
+ self.cameraUnavailableLabel.isHidden = true
236
+ self.resumeButton.isHidden = true
237
+ }
238
+
239
+ func presentVideoConfigurationErrorAlert(_ manager: CameraFeedManager) {
240
+ let alertController = UIAlertController(
241
+ title: "Confirguration Failed", message: "Configuration of camera has failed.",
242
+ preferredStyle: .alert)
243
+ let okAction = UIAlertAction(title: "OK", style: .cancel, handler: nil)
244
+ alertController.addAction(okAction)
245
+
246
+ present(alertController, animated: true, completion: nil)
247
+ }
248
+
249
+ func presentCameraPermissionsDeniedAlert(_ manager: CameraFeedManager) {
250
+ let alertController = UIAlertController(
251
+ title: "Camera Permissions Denied",
252
+ message:
253
+ "Camera permissions have been denied for this app. You can change this by going to Settings",
254
+ preferredStyle: .alert)
255
+
256
+ let cancelAction = UIAlertAction(title: "Cancel", style: .cancel, handler: nil)
257
+ let settingsAction = UIAlertAction(title: "Settings", style: .default) { action in
258
+ if let url = URL.init(string: UIApplication.openSettingsURLString) {
259
+ UIApplication.shared.open(url, options: [:], completionHandler: nil)
260
+ }
261
+ }
262
+
263
+ alertController.addAction(cancelAction)
264
+ alertController.addAction(settingsAction)
265
+
266
+ present(alertController, animated: true, completion: nil)
267
+ }
268
+
269
+ @objc func runModel(on pixelBuffer: CVPixelBuffer) {
270
+ guard let overlayViewFrame = overlayViewFrame, let previewViewFrame = previewViewFrame
271
+ else {
272
+ return
273
+ }
274
+ // To put `overlayView` area as model input, transform `overlayViewFrame` following transform
275
+ // from `previewView` to `pixelBuffer`. `previewView` area is transformed to fit in
276
+ // `pixelBuffer`, because `pixelBuffer` as a camera output is resized to fill `previewView`.
277
+ // https://developer.apple.com/documentation/avfoundation/avlayervideogravity/1385607-resizeaspectfill
278
+ let modelInputRange = overlayViewFrame.applying(
279
+ previewViewFrame.size.transformKeepAspect(toFitIn: pixelBuffer.size))
280
+
281
+ // Run Midas model.
282
+ guard
283
+ let (result, width, height, times) = self.modelDataHandler?.runMidas(
284
+ on: pixelBuffer,
285
+ from: modelInputRange,
286
+ to: overlayViewFrame.size)
287
+ else {
288
+ os_log("Cannot get inference result.", type: .error)
289
+ return
290
+ }
291
+
292
+ if avg_latency == 0 {
293
+ avg_latency = times.inference
294
+ } else {
295
+ avg_latency = times.inference*0.1 + avg_latency*0.9
296
+ }
297
+
298
+ // Udpate `inferencedData` to render data in `tableView`.
299
+ inferencedData = InferencedData(score: Float(avg_latency), times: times)
300
+
301
+ //let height = 256
302
+ //let width = 256
303
+
304
+ let outputs = result
305
+ let outputs_size = width * height;
306
+
307
+ var multiplier : Float = 1.0;
308
+
309
+ let max_val : Float = outputs.max() ?? 0
310
+ let min_val : Float = outputs.min() ?? 0
311
+
312
+ if((max_val - min_val) > 0) {
313
+ multiplier = 255 / (max_val - min_val);
314
+ }
315
+
316
+ // Draw result.
317
+ DispatchQueue.main.async {
318
+ self.tableView.reloadData()
319
+
320
+ var pixels: [PixelData] = .init(repeating: .init(a: 255, r: 0, g: 0, b: 0), count: width * height)
321
+
322
+ for i in pixels.indices {
323
+ //if(i < 1000)
324
+ //{
325
+ let val = UInt8((outputs[i] - min_val) * multiplier)
326
+
327
+ pixels[i].r = val
328
+ pixels[i].g = val
329
+ pixels[i].b = val
330
+ //}
331
+ }
332
+
333
+
334
+ /*
335
+ pixels[i].a = 255
336
+ pixels[i].r = .random(in: 0...255)
337
+ pixels[i].g = .random(in: 0...255)
338
+ pixels[i].b = .random(in: 0...255)
339
+ }
340
+ */
341
+
342
+ DispatchQueue.main.async {
343
+ let image = UIImage(pixels: pixels, width: width, height: height)
344
+
345
+ self.imageView.image = image
346
+
347
+ if (self.imageViewInitialized == false) {
348
+ self.imageViewInitialized = true
349
+ self.overlayView.addSubview(self.imageView)
350
+ self.overlayView.setNeedsDisplay()
351
+ }
352
+ }
353
+
354
+ /*
355
+ let image = UIImage(pixels: pixels, width: width, height: height)
356
+
357
+ var imageView : UIImageView
358
+ imageView = UIImageView(frame:CGRect(x:0, y:0, width:400, height:400));
359
+ imageView.image = image
360
+ self.overlayView.addSubview(imageView)
361
+ self.overlayView.setNeedsDisplay()
362
+ */
363
+ }
364
+ }
365
+ /*
366
+ func drawResult(of result: Result) {
367
+ self.overlayView.dots = result.dots
368
+ self.overlayView.lines = result.lines
369
+ self.overlayView.setNeedsDisplay()
370
+ }
371
+
372
+ func clearResult() {
373
+ self.overlayView.clear()
374
+ self.overlayView.setNeedsDisplay()
375
+ }
376
+ */
377
+
378
+ }
379
+
380
+
381
+ // MARK: - TableViewDelegate, TableViewDataSource Methods
382
+ extension ViewController: UITableViewDelegate, UITableViewDataSource {
383
+ func numberOfSections(in tableView: UITableView) -> Int {
384
+ return InferenceSections.allCases.count
385
+ }
386
+
387
+ func tableView(_ tableView: UITableView, numberOfRowsInSection section: Int) -> Int {
388
+ guard let section = InferenceSections(rawValue: section) else {
389
+ return 0
390
+ }
391
+
392
+ return section.subcaseCount
393
+ }
394
+
395
+ func tableView(_ tableView: UITableView, cellForRowAt indexPath: IndexPath) -> UITableViewCell {
396
+ let cell = tableView.dequeueReusableCell(withIdentifier: "InfoCell") as! InfoCell
397
+ guard let section = InferenceSections(rawValue: indexPath.section) else {
398
+ return cell
399
+ }
400
+ guard let data = inferencedData else { return cell }
401
+
402
+ var fieldName: String
403
+ var info: String
404
+
405
+ switch section {
406
+ case .Score:
407
+ fieldName = section.description
408
+ info = String(format: "%.3f", data.score)
409
+ case .Time:
410
+ guard let row = ProcessingTimes(rawValue: indexPath.row) else {
411
+ return cell
412
+ }
413
+ var time: Double
414
+ switch row {
415
+ case .InferenceTime:
416
+ time = data.times.inference
417
+ }
418
+ fieldName = row.description
419
+ info = String(format: "%.2fms", time)
420
+ }
421
+
422
+ cell.fieldNameLabel.text = fieldName
423
+ cell.infoLabel.text = info
424
+
425
+ return cell
426
+ }
427
+
428
+ func tableView(_ tableView: UITableView, heightForRowAt indexPath: IndexPath) -> CGFloat {
429
+ guard let section = InferenceSections(rawValue: indexPath.section) else {
430
+ return 0
431
+ }
432
+
433
+ var height = Traits.normalCellHeight
434
+ if indexPath.row == section.subcaseCount - 1 {
435
+ height = Traits.separatorCellHeight + Traits.bottomSpacing
436
+ }
437
+ return height
438
+ }
439
+
440
+ }
441
+
442
+ // MARK: - Private enums
443
+ /// UI coinstraint values
444
+ fileprivate enum Traits {
445
+ static let normalCellHeight: CGFloat = 35.0
446
+ static let separatorCellHeight: CGFloat = 25.0
447
+ static let bottomSpacing: CGFloat = 30.0
448
+ }
449
+
450
+ fileprivate struct InferencedData {
451
+ var score: Float
452
+ var times: Times
453
+ }
454
+
455
+ /// Type of sections in Info Cell
456
+ fileprivate enum InferenceSections: Int, CaseIterable {
457
+ case Score
458
+ case Time
459
+
460
+ var description: String {
461
+ switch self {
462
+ case .Score:
463
+ return "Average"
464
+ case .Time:
465
+ return "Processing Time"
466
+ }
467
+ }
468
+
469
+ var subcaseCount: Int {
470
+ switch self {
471
+ case .Score:
472
+ return 1
473
+ case .Time:
474
+ return ProcessingTimes.allCases.count
475
+ }
476
+ }
477
+ }
478
+
479
+ /// Type of processing times in Time section in Info Cell
480
+ fileprivate enum ProcessingTimes: Int, CaseIterable {
481
+ case InferenceTime
482
+
483
+ var description: String {
484
+ switch self {
485
+ case .InferenceTime:
486
+ return "Inference Time"
487
+ }
488
+ }
489
+ }
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/ios/Midas/Views/OverlayView.swift ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // Copyright 2019 The TensorFlow Authors. All Rights Reserved.
2
+ //
3
+ // Licensed under the Apache License, Version 2.0 (the "License");
4
+ // you may not use this file except in compliance with the License.
5
+ // You may obtain a copy of the License at
6
+ //
7
+ // http://www.apache.org/licenses/LICENSE-2.0
8
+ //
9
+ // Unless required by applicable law or agreed to in writing, software
10
+ // distributed under the License is distributed on an "AS IS" BASIS,
11
+ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ // See the License for the specific language governing permissions and
13
+ // limitations under the License.
14
+
15
+ import UIKit
16
+
17
+ /// UIView for rendering inference output.
18
+ class OverlayView: UIView {
19
+
20
+ var dots = [CGPoint]()
21
+ var lines = [Line]()
22
+
23
+ override func draw(_ rect: CGRect) {
24
+ for dot in dots {
25
+ drawDot(of: dot)
26
+ }
27
+ for line in lines {
28
+ drawLine(of: line)
29
+ }
30
+ }
31
+
32
+ func drawDot(of dot: CGPoint) {
33
+ let dotRect = CGRect(
34
+ x: dot.x - Traits.dot.radius / 2, y: dot.y - Traits.dot.radius / 2,
35
+ width: Traits.dot.radius, height: Traits.dot.radius)
36
+ let dotPath = UIBezierPath(ovalIn: dotRect)
37
+
38
+ Traits.dot.color.setFill()
39
+ dotPath.fill()
40
+ }
41
+
42
+ func drawLine(of line: Line) {
43
+ let linePath = UIBezierPath()
44
+ linePath.move(to: CGPoint(x: line.from.x, y: line.from.y))
45
+ linePath.addLine(to: CGPoint(x: line.to.x, y: line.to.y))
46
+ linePath.close()
47
+
48
+ linePath.lineWidth = Traits.line.width
49
+ Traits.line.color.setStroke()
50
+
51
+ linePath.stroke()
52
+ }
53
+
54
+ func clear() {
55
+ self.dots = []
56
+ self.lines = []
57
+ }
58
+ }
59
+
60
+ private enum Traits {
61
+ static let dot = (radius: CGFloat(5), color: UIColor.orange)
62
+ static let line = (width: CGFloat(1.0), color: UIColor.orange)
63
+ }
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/ios/README.md ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Tensorflow Lite MiDaS iOS Example
2
+
3
+ ### Requirements
4
+
5
+ - XCode 11.0 or above
6
+ - iOS 12.0 or above, [iOS 14 breaks the NPU Delegate](https://github.com/tensorflow/tensorflow/issues/43339)
7
+ - TensorFlow 2.4.0, TensorFlowLiteSwift -> 0.0.1-nightly
8
+
9
+ ## Quick Start with a MiDaS Example
10
+
11
+ MiDaS is a neural network to compute depth from a single image. It uses TensorFlowLiteSwift / C++ libraries on iOS. The code is written in Swift.
12
+
13
+ Paper: https://arxiv.org/abs/1907.01341
14
+
15
+ > Towards Robust Monocular Depth Estimation: Mixing Datasets for Zero-shot Cross-dataset Transfer
16
+ > René Ranftl, Katrin Lasinger, David Hafner, Konrad Schindler, Vladlen Koltun
17
+
18
+ ### Install TensorFlow
19
+
20
+ Set default python version to python3:
21
+
22
+ ```
23
+ echo 'export PATH=/usr/local/opt/python/libexec/bin:$PATH' >> ~/.zshenv
24
+ echo 'alias python=python3' >> ~/.zshenv
25
+ echo 'alias pip=pip3' >> ~/.zshenv
26
+ ```
27
+
28
+ Install TensorFlow
29
+
30
+ ```shell
31
+ pip install tensorflow
32
+ ```
33
+
34
+ ### Install TensorFlowLiteSwift via Cocoapods
35
+
36
+ Set required TensorFlowLiteSwift version in the file (`0.0.1-nightly` is recommended): https://github.com/isl-org/MiDaS/blob/master/mobile/ios/Podfile#L9
37
+
38
+ Install: brew, ruby, cocoapods
39
+
40
+ ```
41
+ ruby -e "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)"
42
+ brew install mc rbenv ruby-build
43
+ sudo gem install cocoapods
44
+ ```
45
+
46
+
47
+ The TensorFlowLiteSwift library is available in [Cocoapods](https://cocoapods.org/), to integrate it to our project, we can run in the root directory of the project:
48
+
49
+ ```ruby
50
+ pod install
51
+ ```
52
+
53
+ Now open the `Midas.xcworkspace` file in XCode, select your iPhone device (XCode->Product->Destination->iPhone) and launch it (cmd + R). If everything works well, you should see a real-time depth map from your camera.
54
+
55
+ ### Model
56
+
57
+ The TensorFlow (TFlite) model `midas.tflite` is in the folder `/Midas/Model`
58
+
59
+
60
+ To use another model, you should convert it from TensorFlow saved-model to TFlite model (so that it can be deployed):
61
+
62
+ ```python
63
+ saved_model_export_dir = "./saved_model"
64
+ converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_export_dir)
65
+ tflite_model = converter.convert()
66
+ open(model_tflite_name, "wb").write("model.tflite")
67
+ ```
68
+
69
+ ### Setup XCode
70
+
71
+ * Open directory `.xcworkspace` from the XCode
72
+
73
+ * Press on your ProjectName (left-top corner) -> change Bundle Identifier to `com.midas.tflite-npu` or something like this (it should be unique)
74
+
75
+ * select your Developer Team (your should be signed-in by using your AppleID)
76
+
77
+ * Connect your iPhone (if you want to run it on real device instead of simulator), select your iPhone device (XCode->Product->Destination->iPhone)
78
+
79
+ * Click in the XCode: Product -> Run
80
+
81
+ * On your iPhone device go to the: Settings -> General -> Device Management (or Profiles) -> Apple Development -> Trust Apple Development
82
+
83
+ ----
84
+
85
+ Original repository: https://github.com/isl-org/MiDaS
86
+
87
+
88
+ ### Examples:
89
+
90
+ | ![photo_2020-09-27_17-43-20](https://user-images.githubusercontent.com/4096485/94367804-9610de80-00e9-11eb-8a23-8b32a6f52d41.jpg) | ![photo_2020-09-27_17-49-22](https://user-images.githubusercontent.com/4096485/94367974-7201cd00-00ea-11eb-8e0a-68eb9ea10f63.jpg) | ![photo_2020-09-27_17-52-30](https://user-images.githubusercontent.com/4096485/94367976-729a6380-00ea-11eb-8ce0-39d3e26dd550.jpg) | ![photo_2020-09-27_17-43-21](https://user-images.githubusercontent.com/4096485/94367807-97420b80-00e9-11eb-9dcd-848ad9e89e03.jpg) |
91
+ |---|---|---|---|
92
+
93
+ ## LICENSE
94
+
95
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
96
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
97
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
98
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
99
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
100
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
101
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
102
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
103
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
104
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
105
+ POSSIBILITY OF SUCH DAMAGE.
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/mobile/ios/RunScripts/download_models.sh ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ # Download TF Lite model from the internet if it does not exist.
3
+
4
+ TFLITE_MODEL="model_opt.tflite"
5
+ TFLITE_FILE="Midas/Model/${TFLITE_MODEL}"
6
+ MODEL_SRC="https://github.com/isl-org/MiDaS/releases/download/v2/${TFLITE_MODEL}"
7
+
8
+ if test -f "${TFLITE_FILE}"; then
9
+ echo "INFO: TF Lite model already exists. Skip downloading and use the local model."
10
+ else
11
+ curl --create-dirs -o "${TFLITE_FILE}" -LJO "${MODEL_SRC}"
12
+ echo "INFO: Downloaded TensorFlow Lite model to ${TFLITE_FILE}."
13
+ fi
14
+
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/base_models/midas_repo/utils.py ADDED
@@ -0,0 +1,199 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Utils for monoDepth.
2
+ """
3
+ import sys
4
+ import re
5
+ import numpy as np
6
+ import cv2
7
+ import torch
8
+
9
+
10
+ def read_pfm(path):
11
+ """Read pfm file.
12
+
13
+ Args:
14
+ path (str): path to file
15
+
16
+ Returns:
17
+ tuple: (data, scale)
18
+ """
19
+ with open(path, "rb") as file:
20
+
21
+ color = None
22
+ width = None
23
+ height = None
24
+ scale = None
25
+ endian = None
26
+
27
+ header = file.readline().rstrip()
28
+ if header.decode("ascii") == "PF":
29
+ color = True
30
+ elif header.decode("ascii") == "Pf":
31
+ color = False
32
+ else:
33
+ raise Exception("Not a PFM file: " + path)
34
+
35
+ dim_match = re.match(r"^(\d+)\s(\d+)\s$", file.readline().decode("ascii"))
36
+ if dim_match:
37
+ width, height = list(map(int, dim_match.groups()))
38
+ else:
39
+ raise Exception("Malformed PFM header.")
40
+
41
+ scale = float(file.readline().decode("ascii").rstrip())
42
+ if scale < 0:
43
+ # little-endian
44
+ endian = "<"
45
+ scale = -scale
46
+ else:
47
+ # big-endian
48
+ endian = ">"
49
+
50
+ data = np.fromfile(file, endian + "f")
51
+ shape = (height, width, 3) if color else (height, width)
52
+
53
+ data = np.reshape(data, shape)
54
+ data = np.flipud(data)
55
+
56
+ return data, scale
57
+
58
+
59
+ def write_pfm(path, image, scale=1):
60
+ """Write pfm file.
61
+
62
+ Args:
63
+ path (str): pathto file
64
+ image (array): data
65
+ scale (int, optional): Scale. Defaults to 1.
66
+ """
67
+
68
+ with open(path, "wb") as file:
69
+ color = None
70
+
71
+ if image.dtype.name != "float32":
72
+ raise Exception("Image dtype must be float32.")
73
+
74
+ image = np.flipud(image)
75
+
76
+ if len(image.shape) == 3 and image.shape[2] == 3: # color image
77
+ color = True
78
+ elif (
79
+ len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1
80
+ ): # greyscale
81
+ color = False
82
+ else:
83
+ raise Exception("Image must have H x W x 3, H x W x 1 or H x W dimensions.")
84
+
85
+ file.write("PF\n" if color else "Pf\n".encode())
86
+ file.write("%d %d\n".encode() % (image.shape[1], image.shape[0]))
87
+
88
+ endian = image.dtype.byteorder
89
+
90
+ if endian == "<" or endian == "=" and sys.byteorder == "little":
91
+ scale = -scale
92
+
93
+ file.write("%f\n".encode() % scale)
94
+
95
+ image.tofile(file)
96
+
97
+
98
+ def read_image(path):
99
+ """Read image and output RGB image (0-1).
100
+
101
+ Args:
102
+ path (str): path to file
103
+
104
+ Returns:
105
+ array: RGB image (0-1)
106
+ """
107
+ img = cv2.imread(path)
108
+
109
+ if img.ndim == 2:
110
+ img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
111
+
112
+ img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) / 255.0
113
+
114
+ return img
115
+
116
+
117
+ def resize_image(img):
118
+ """Resize image and make it fit for network.
119
+
120
+ Args:
121
+ img (array): image
122
+
123
+ Returns:
124
+ tensor: data ready for network
125
+ """
126
+ height_orig = img.shape[0]
127
+ width_orig = img.shape[1]
128
+
129
+ if width_orig > height_orig:
130
+ scale = width_orig / 384
131
+ else:
132
+ scale = height_orig / 384
133
+
134
+ height = (np.ceil(height_orig / scale / 32) * 32).astype(int)
135
+ width = (np.ceil(width_orig / scale / 32) * 32).astype(int)
136
+
137
+ img_resized = cv2.resize(img, (width, height), interpolation=cv2.INTER_AREA)
138
+
139
+ img_resized = (
140
+ torch.from_numpy(np.transpose(img_resized, (2, 0, 1))).contiguous().float()
141
+ )
142
+ img_resized = img_resized.unsqueeze(0)
143
+
144
+ return img_resized
145
+
146
+
147
+ def resize_depth(depth, width, height):
148
+ """Resize depth map and bring to CPU (numpy).
149
+
150
+ Args:
151
+ depth (tensor): depth
152
+ width (int): image width
153
+ height (int): image height
154
+
155
+ Returns:
156
+ array: processed depth
157
+ """
158
+ depth = torch.squeeze(depth[0, :, :, :]).to("cpu")
159
+
160
+ depth_resized = cv2.resize(
161
+ depth.numpy(), (width, height), interpolation=cv2.INTER_CUBIC
162
+ )
163
+
164
+ return depth_resized
165
+
166
+ def write_depth(path, depth, grayscale, bits=1):
167
+ """Write depth map to png file.
168
+
169
+ Args:
170
+ path (str): filepath without extension
171
+ depth (array): depth
172
+ grayscale (bool): use a grayscale colormap?
173
+ """
174
+ if not grayscale:
175
+ bits = 1
176
+
177
+ if not np.isfinite(depth).all():
178
+ depth=np.nan_to_num(depth, nan=0.0, posinf=0.0, neginf=0.0)
179
+ print("WARNING: Non-finite depth values present")
180
+
181
+ depth_min = depth.min()
182
+ depth_max = depth.max()
183
+
184
+ max_val = (2**(8*bits))-1
185
+
186
+ if depth_max - depth_min > np.finfo("float").eps:
187
+ out = max_val * (depth - depth_min) / (depth_max - depth_min)
188
+ else:
189
+ out = np.zeros(depth.shape, dtype=depth.dtype)
190
+
191
+ if not grayscale:
192
+ out = cv2.applyColorMap(np.uint8(out), cv2.COLORMAP_INFERNO)
193
+
194
+ if bits == 1:
195
+ cv2.imwrite(path + ".png", out.astype("uint8"))
196
+ elif bits == 2:
197
+ cv2.imwrite(path + ".png", out.astype("uint16"))
198
+
199
+ return
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/builder.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+
3
+ # Copyright (c) 2022 Intelligent Systems Lab Org
4
+
5
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ # of this software and associated documentation files (the "Software"), to deal
7
+ # in the Software without restriction, including without limitation the rights
8
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ # copies of the Software, and to permit persons to whom the Software is
10
+ # furnished to do so, subject to the following conditions:
11
+
12
+ # The above copyright notice and this permission notice shall be included in all
13
+ # copies or substantial portions of the Software.
14
+
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ # SOFTWARE.
22
+
23
+ # File author: Shariq Farooq Bhat
24
+
25
+ from importlib import import_module
26
+ from .depth_model import DepthModel
27
+
28
+ def build_model(config) -> DepthModel:
29
+ """Builds a model from a config. The model is specified by the model name and version in the config. The model is then constructed using the build_from_config function of the model interface.
30
+ This function should be used to construct models for training and evaluation.
31
+
32
+ Args:
33
+ config (dict): Config dict. Config is constructed in utils/config.py. Each model has its own config file(s) saved in its root model folder.
34
+
35
+ Returns:
36
+ torch.nn.Module: Model corresponding to name and version as specified in config
37
+ """
38
+ module_name = f"zoedepth.models.{config.model}"
39
+ try:
40
+ module = import_module(module_name)
41
+ except ModuleNotFoundError as e:
42
+ # print the original error message
43
+ print(e)
44
+ raise ValueError(
45
+ f"Model {config.model} not found. Refer above error for details.") from e
46
+ try:
47
+ get_version = getattr(module, "get_version")
48
+ except AttributeError as e:
49
+ raise ValueError(
50
+ f"Model {config.model} has no get_version function.") from e
51
+ return get_version(config.version_name).build_from_config(config)
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/depth_model.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+
3
+ # Copyright (c) 2022 Intelligent Systems Lab Org
4
+
5
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ # of this software and associated documentation files (the "Software"), to deal
7
+ # in the Software without restriction, including without limitation the rights
8
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ # copies of the Software, and to permit persons to whom the Software is
10
+ # furnished to do so, subject to the following conditions:
11
+
12
+ # The above copyright notice and this permission notice shall be included in all
13
+ # copies or substantial portions of the Software.
14
+
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ # SOFTWARE.
22
+
23
+ # File author: Shariq Farooq Bhat
24
+
25
+ import numpy as np
26
+ import torch
27
+ import torch.nn as nn
28
+ import torch.nn.functional as F
29
+ from torchvision import transforms
30
+ import PIL.Image
31
+ from PIL import Image
32
+ from typing import Union
33
+
34
+
35
+ class DepthModel(nn.Module):
36
+ def __init__(self):
37
+ super().__init__()
38
+ self.device = 'cpu'
39
+
40
+ def to(self, device) -> nn.Module:
41
+ self.device = device
42
+ return super().to(device)
43
+
44
+ def forward(self, x, *args, **kwargs):
45
+ raise NotImplementedError
46
+
47
+ def _infer(self, x: torch.Tensor):
48
+ """
49
+ Inference interface for the model
50
+ Args:
51
+ x (torch.Tensor): input tensor of shape (b, c, h, w)
52
+ Returns:
53
+ torch.Tensor: output tensor of shape (b, 1, h, w)
54
+ """
55
+ return self(x)['metric_depth']
56
+
57
+ def _infer_with_pad_aug(self, x: torch.Tensor, pad_input: bool=True, fh: float=3, fw: float=3, upsampling_mode: str='bicubic', padding_mode="reflect", **kwargs) -> torch.Tensor:
58
+ """
59
+ Inference interface for the model with padding augmentation
60
+ Padding augmentation fixes the boundary artifacts in the output depth map.
61
+ Boundary artifacts are sometimes caused by the fact that the model is trained on NYU raw dataset which has a black or white border around the image.
62
+ This augmentation pads the input image and crops the prediction back to the original size / view.
63
+
64
+ Note: This augmentation is not required for the models trained with 'avoid_boundary'=True.
65
+ Args:
66
+ x (torch.Tensor): input tensor of shape (b, c, h, w)
67
+ pad_input (bool, optional): whether to pad the input or not. Defaults to True.
68
+ fh (float, optional): height padding factor. The padding is calculated as sqrt(h/2) * fh. Defaults to 3.
69
+ fw (float, optional): width padding factor. The padding is calculated as sqrt(w/2) * fw. Defaults to 3.
70
+ upsampling_mode (str, optional): upsampling mode. Defaults to 'bicubic'.
71
+ padding_mode (str, optional): padding mode. Defaults to "reflect".
72
+ Returns:
73
+ torch.Tensor: output tensor of shape (b, 1, h, w)
74
+ """
75
+ # assert x is nchw and c = 3
76
+ assert x.dim() == 4, "x must be 4 dimensional, got {}".format(x.dim())
77
+ assert x.shape[1] == 3, "x must have 3 channels, got {}".format(x.shape[1])
78
+
79
+ if pad_input:
80
+ assert fh > 0 or fw > 0, "atlease one of fh and fw must be greater than 0"
81
+ pad_h = int(np.sqrt(x.shape[2]/2) * fh)
82
+ pad_w = int(np.sqrt(x.shape[3]/2) * fw)
83
+ padding = [pad_w, pad_w]
84
+ if pad_h > 0:
85
+ padding += [pad_h, pad_h]
86
+
87
+ x = F.pad(x, padding, mode=padding_mode, **kwargs)
88
+ out = self._infer(x)
89
+ if out.shape[-2:] != x.shape[-2:]:
90
+ out = F.interpolate(out, size=(x.shape[2], x.shape[3]), mode=upsampling_mode, align_corners=False)
91
+ if pad_input:
92
+ # crop to the original size, handling the case where pad_h and pad_w is 0
93
+ if pad_h > 0:
94
+ out = out[:, :, pad_h:-pad_h,:]
95
+ if pad_w > 0:
96
+ out = out[:, :, :, pad_w:-pad_w]
97
+ return out
98
+
99
+ def infer_with_flip_aug(self, x, pad_input: bool=True, **kwargs) -> torch.Tensor:
100
+ """
101
+ Inference interface for the model with horizontal flip augmentation
102
+ Horizontal flip augmentation improves the accuracy of the model by averaging the output of the model with and without horizontal flip.
103
+ Args:
104
+ x (torch.Tensor): input tensor of shape (b, c, h, w)
105
+ pad_input (bool, optional): whether to use padding augmentation. Defaults to True.
106
+ Returns:
107
+ torch.Tensor: output tensor of shape (b, 1, h, w)
108
+ """
109
+ # infer with horizontal flip and average
110
+ out = self._infer_with_pad_aug(x, pad_input=pad_input, **kwargs)
111
+ out_flip = self._infer_with_pad_aug(torch.flip(x, dims=[3]), pad_input=pad_input, **kwargs)
112
+ out = (out + torch.flip(out_flip, dims=[3])) / 2
113
+ return out
114
+
115
+ def infer(self, x, pad_input: bool=True, with_flip_aug: bool=True, **kwargs) -> torch.Tensor:
116
+ """
117
+ Inference interface for the model
118
+ Args:
119
+ x (torch.Tensor): input tensor of shape (b, c, h, w)
120
+ pad_input (bool, optional): whether to use padding augmentation. Defaults to True.
121
+ with_flip_aug (bool, optional): whether to use horizontal flip augmentation. Defaults to True.
122
+ Returns:
123
+ torch.Tensor: output tensor of shape (b, 1, h, w)
124
+ """
125
+ if with_flip_aug:
126
+ return self.infer_with_flip_aug(x, pad_input=pad_input, **kwargs)
127
+ else:
128
+ return self._infer_with_pad_aug(x, pad_input=pad_input, **kwargs)
129
+
130
+ @torch.no_grad()
131
+ def infer_pil(self, pil_img, pad_input: bool=True, with_flip_aug: bool=True, output_type: str="numpy", **kwargs) -> Union[np.ndarray, PIL.Image.Image, torch.Tensor]:
132
+ """
133
+ Inference interface for the model for PIL image
134
+ Args:
135
+ pil_img (PIL.Image.Image): input PIL image
136
+ pad_input (bool, optional): whether to use padding augmentation. Defaults to True.
137
+ with_flip_aug (bool, optional): whether to use horizontal flip augmentation. Defaults to True.
138
+ output_type (str, optional): output type. Supported values are 'numpy', 'pil' and 'tensor'. Defaults to "numpy".
139
+ """
140
+ x = transforms.ToTensor()(pil_img).unsqueeze(0).to(self.device)
141
+ out_tensor = self.infer(x, pad_input=pad_input, with_flip_aug=with_flip_aug, **kwargs)
142
+ if output_type == "numpy":
143
+ return out_tensor.squeeze().cpu().numpy()
144
+ elif output_type == "pil":
145
+ # uint16 is required for depth pil image
146
+ out_16bit_numpy = (out_tensor.squeeze().cpu().numpy()*256).astype(np.uint16)
147
+ return Image.fromarray(out_16bit_numpy)
148
+ elif output_type == "tensor":
149
+ return out_tensor.squeeze().cpu()
150
+ else:
151
+ raise ValueError(f"output_type {output_type} not supported. Supported values are 'numpy', 'pil' and 'tensor'")
152
+
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/model_io.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+
3
+ # Copyright (c) 2022 Intelligent Systems Lab Org
4
+
5
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ # of this software and associated documentation files (the "Software"), to deal
7
+ # in the Software without restriction, including without limitation the rights
8
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ # copies of the Software, and to permit persons to whom the Software is
10
+ # furnished to do so, subject to the following conditions:
11
+
12
+ # The above copyright notice and this permission notice shall be included in all
13
+ # copies or substantial portions of the Software.
14
+
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ # SOFTWARE.
22
+
23
+ # File author: Shariq Farooq Bhat
24
+
25
+ import torch
26
+
27
+ def load_state_dict(model, state_dict):
28
+ """Load state_dict into model, handling DataParallel and DistributedDataParallel. Also checks for "model" key in state_dict.
29
+
30
+ DataParallel prefixes state_dict keys with 'module.' when saving.
31
+ If the model is not a DataParallel model but the state_dict is, then prefixes are removed.
32
+ If the model is a DataParallel model but the state_dict is not, then prefixes are added.
33
+ """
34
+ state_dict = state_dict.get('model', state_dict)
35
+ # if model is a DataParallel model, then state_dict keys are prefixed with 'module.'
36
+
37
+ do_prefix = isinstance(
38
+ model, (torch.nn.DataParallel, torch.nn.parallel.DistributedDataParallel))
39
+ state = {}
40
+ for k, v in state_dict.items():
41
+ if k.startswith('module.') and not do_prefix:
42
+ k = k[7:]
43
+
44
+ if not k.startswith('module.') and do_prefix:
45
+ k = 'module.' + k
46
+
47
+ state[k] = v
48
+
49
+ model.load_state_dict(state)
50
+ print("Loaded successfully")
51
+ return model
52
+
53
+
54
+ def load_wts(model, checkpoint_path):
55
+ ckpt = torch.load(checkpoint_path, map_location='cpu')
56
+ return load_state_dict(model, ckpt)
57
+
58
+
59
+ def load_state_dict_from_url(model, url, **kwargs):
60
+ state_dict = torch.hub.load_state_dict_from_url(url, map_location='cpu', **kwargs)
61
+ return load_state_dict(model, state_dict)
62
+
63
+
64
+ def load_state_from_resource(model, resource: str):
65
+ """Loads weights to the model from a given resource. A resource can be of following types:
66
+ 1. URL. Prefixed with "url::"
67
+ e.g. url::http(s)://url.resource.com/ckpt.pt
68
+
69
+ 2. Local path. Prefixed with "local::"
70
+ e.g. local::/path/to/ckpt.pt
71
+
72
+
73
+ Args:
74
+ model (torch.nn.Module): Model
75
+ resource (str): resource string
76
+
77
+ Returns:
78
+ torch.nn.Module: Model with loaded weights
79
+ """
80
+ print(f"Using pretrained resource {resource}")
81
+
82
+ if resource.startswith('url::'):
83
+ url = resource.split('url::')[1]
84
+ return load_state_dict_from_url(model, url, progress=True)
85
+
86
+ elif resource.startswith('local::'):
87
+ path = resource.split('local::')[1]
88
+ return load_wts(model, path)
89
+
90
+ else:
91
+ raise ValueError("Invalid resource type, only url:: and local:: are supported")
92
+
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/zoedepth/__init__.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+
3
+ # Copyright (c) 2022 Intelligent Systems Lab Org
4
+
5
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ # of this software and associated documentation files (the "Software"), to deal
7
+ # in the Software without restriction, including without limitation the rights
8
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ # copies of the Software, and to permit persons to whom the Software is
10
+ # furnished to do so, subject to the following conditions:
11
+
12
+ # The above copyright notice and this permission notice shall be included in all
13
+ # copies or substantial portions of the Software.
14
+
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ # SOFTWARE.
22
+
23
+ # File author: Shariq Farooq Bhat
24
+
25
+ from .zoedepth_v1 import ZoeDepth
26
+
27
+ all_versions = {
28
+ "v1": ZoeDepth,
29
+ }
30
+
31
+ get_version = lambda v : all_versions[v]
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/zoedepth/__pycache__/__init__.cpython-39.pyc ADDED
Binary file (377 Bytes). View file
 
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/zoedepth/__pycache__/zoedepth_v1.cpython-39.pyc ADDED
Binary file (9.16 kB). View file
 
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/zoedepth/config_zoedepth.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": {
3
+ "name": "ZoeDepth",
4
+ "version_name": "v1",
5
+ "n_bins": 64,
6
+ "bin_embedding_dim": 128,
7
+ "bin_centers_type": "softplus",
8
+ "n_attractors":[16, 8, 4, 1],
9
+ "attractor_alpha": 1000,
10
+ "attractor_gamma": 2,
11
+ "attractor_kind" : "mean",
12
+ "attractor_type" : "inv",
13
+ "midas_model_type" : "DPT_BEiT_L_384",
14
+ "min_temp": 0.0212,
15
+ "max_temp": 50.0,
16
+ "output_distribution": "logbinomial",
17
+ "memory_efficient": true,
18
+ "inverse_midas": false,
19
+ "img_size": [384, 512]
20
+ },
21
+
22
+ "train": {
23
+ "train_midas": true,
24
+ "use_pretrained_midas": true,
25
+ "trainer": "zoedepth",
26
+ "epochs": 5,
27
+ "bs": 16,
28
+ "optim_kwargs": {"lr": 0.000161, "wd": 0.01},
29
+ "sched_kwargs": {"div_factor": 1, "final_div_factor": 10000, "pct_start": 0.7, "three_phase":false, "cycle_momentum": true},
30
+ "same_lr": false,
31
+ "w_si": 1,
32
+ "w_domain": 0.2,
33
+ "w_reg": 0,
34
+ "w_grad": 0,
35
+ "avoid_boundary": false,
36
+ "random_crop": false,
37
+ "input_width": 640,
38
+ "input_height": 480,
39
+ "midas_lr_factor": 1,
40
+ "encoder_lr_factor":10,
41
+ "pos_enc_lr_factor":10,
42
+ "freeze_midas_bn": true
43
+
44
+ },
45
+
46
+ "infer":{
47
+ "train_midas": false,
48
+ "use_pretrained_midas": false,
49
+ "pretrained_resource" : null,
50
+ "force_keep_ar": true
51
+ },
52
+
53
+ "eval":{
54
+ "train_midas": false,
55
+ "use_pretrained_midas": false,
56
+ "pretrained_resource" : null
57
+ }
58
+ }
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/zoedepth/config_zoedepth_kitti.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "model": {
3
+ "bin_centers_type": "normed",
4
+ "img_size": [384, 768]
5
+ },
6
+
7
+ "train": {
8
+ },
9
+
10
+ "infer":{
11
+ "train_midas": false,
12
+ "use_pretrained_midas": false,
13
+ "pretrained_resource" : "url::https://github.com/isl-org/ZoeDepth/releases/download/v1.0/ZoeD_M12_K.pt",
14
+ "force_keep_ar": true
15
+ },
16
+
17
+ "eval":{
18
+ "train_midas": false,
19
+ "use_pretrained_midas": false,
20
+ "pretrained_resource" : "url::https://github.com/isl-org/ZoeDepth/releases/download/v1.0/ZoeD_M12_K.pt"
21
+ }
22
+ }
CCEdit-main/src/controlnet11/annotator/zoe/zoedepth/models/zoedepth/zoedepth_v1.py ADDED
@@ -0,0 +1,250 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MIT License
2
+
3
+ # Copyright (c) 2022 Intelligent Systems Lab Org
4
+
5
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ # of this software and associated documentation files (the "Software"), to deal
7
+ # in the Software without restriction, including without limitation the rights
8
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ # copies of the Software, and to permit persons to whom the Software is
10
+ # furnished to do so, subject to the following conditions:
11
+
12
+ # The above copyright notice and this permission notice shall be included in all
13
+ # copies or substantial portions of the Software.
14
+
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ # SOFTWARE.
22
+
23
+ # File author: Shariq Farooq Bhat
24
+
25
+ import itertools
26
+
27
+ import torch
28
+ import torch.nn as nn
29
+ from ..depth_model import DepthModel
30
+ from ..base_models.midas import MidasCore
31
+ from ..layers.attractor import AttractorLayer, AttractorLayerUnnormed
32
+ from ..layers.dist_layers import ConditionalLogBinomial
33
+ from ..layers.localbins_layers import (Projector, SeedBinRegressor,
34
+ SeedBinRegressorUnnormed)
35
+ from ..model_io import load_state_from_resource
36
+
37
+
38
+ class ZoeDepth(DepthModel):
39
+ def __init__(self, core, n_bins=64, bin_centers_type="softplus", bin_embedding_dim=128, min_depth=1e-3, max_depth=10,
40
+ n_attractors=[16, 8, 4, 1], attractor_alpha=300, attractor_gamma=2, attractor_kind='sum', attractor_type='exp', min_temp=5, max_temp=50, train_midas=True,
41
+ midas_lr_factor=10, encoder_lr_factor=10, pos_enc_lr_factor=10, inverse_midas=False, **kwargs):
42
+ """ZoeDepth model. This is the version of ZoeDepth that has a single metric head
43
+
44
+ Args:
45
+ core (models.base_models.midas.MidasCore): The base midas model that is used for extraction of "relative" features
46
+ n_bins (int, optional): Number of bin centers. Defaults to 64.
47
+ bin_centers_type (str, optional): "normed" or "softplus". Activation type used for bin centers. For "normed" bin centers, linear normalization trick is applied. This results in bounded bin centers.
48
+ For "softplus", softplus activation is used and thus are unbounded. Defaults to "softplus".
49
+ bin_embedding_dim (int, optional): bin embedding dimension. Defaults to 128.
50
+ min_depth (float, optional): Lower bound for normed bin centers. Defaults to 1e-3.
51
+ max_depth (float, optional): Upper bound for normed bin centers. Defaults to 10.
52
+ n_attractors (List[int], optional): Number of bin attractors at decoder layers. Defaults to [16, 8, 4, 1].
53
+ attractor_alpha (int, optional): Proportional attractor strength. Refer to models.layers.attractor for more details. Defaults to 300.
54
+ attractor_gamma (int, optional): Exponential attractor strength. Refer to models.layers.attractor for more details. Defaults to 2.
55
+ attractor_kind (str, optional): Attraction aggregation "sum" or "mean". Defaults to 'sum'.
56
+ attractor_type (str, optional): Type of attractor to use; "inv" (Inverse attractor) or "exp" (Exponential attractor). Defaults to 'exp'.
57
+ min_temp (int, optional): Lower bound for temperature of output probability distribution. Defaults to 5.
58
+ max_temp (int, optional): Upper bound for temperature of output probability distribution. Defaults to 50.
59
+ train_midas (bool, optional): Whether to train "core", the base midas model. Defaults to True.
60
+ midas_lr_factor (int, optional): Learning rate reduction factor for base midas model except its encoder and positional encodings. Defaults to 10.
61
+ encoder_lr_factor (int, optional): Learning rate reduction factor for the encoder in midas model. Defaults to 10.
62
+ pos_enc_lr_factor (int, optional): Learning rate reduction factor for positional encodings in the base midas model. Defaults to 10.
63
+ """
64
+ super().__init__()
65
+
66
+ self.core = core
67
+ self.max_depth = max_depth
68
+ self.min_depth = min_depth
69
+ self.min_temp = min_temp
70
+ self.bin_centers_type = bin_centers_type
71
+
72
+ self.midas_lr_factor = midas_lr_factor
73
+ self.encoder_lr_factor = encoder_lr_factor
74
+ self.pos_enc_lr_factor = pos_enc_lr_factor
75
+ self.train_midas = train_midas
76
+ self.inverse_midas = inverse_midas
77
+
78
+ if self.encoder_lr_factor <= 0:
79
+ self.core.freeze_encoder(
80
+ freeze_rel_pos=self.pos_enc_lr_factor <= 0)
81
+
82
+ N_MIDAS_OUT = 32
83
+ btlnck_features = self.core.output_channels[0]
84
+ num_out_features = self.core.output_channels[1:]
85
+
86
+ self.conv2 = nn.Conv2d(btlnck_features, btlnck_features,
87
+ kernel_size=1, stride=1, padding=0) # btlnck conv
88
+
89
+ if bin_centers_type == "normed":
90
+ SeedBinRegressorLayer = SeedBinRegressor
91
+ Attractor = AttractorLayer
92
+ elif bin_centers_type == "softplus":
93
+ SeedBinRegressorLayer = SeedBinRegressorUnnormed
94
+ Attractor = AttractorLayerUnnormed
95
+ elif bin_centers_type == "hybrid1":
96
+ SeedBinRegressorLayer = SeedBinRegressor
97
+ Attractor = AttractorLayerUnnormed
98
+ elif bin_centers_type == "hybrid2":
99
+ SeedBinRegressorLayer = SeedBinRegressorUnnormed
100
+ Attractor = AttractorLayer
101
+ else:
102
+ raise ValueError(
103
+ "bin_centers_type should be one of 'normed', 'softplus', 'hybrid1', 'hybrid2'")
104
+
105
+ self.seed_bin_regressor = SeedBinRegressorLayer(
106
+ btlnck_features, n_bins=n_bins, min_depth=min_depth, max_depth=max_depth)
107
+ self.seed_projector = Projector(btlnck_features, bin_embedding_dim)
108
+ self.projectors = nn.ModuleList([
109
+ Projector(num_out, bin_embedding_dim)
110
+ for num_out in num_out_features
111
+ ])
112
+ self.attractors = nn.ModuleList([
113
+ Attractor(bin_embedding_dim, n_bins, n_attractors=n_attractors[i], min_depth=min_depth, max_depth=max_depth,
114
+ alpha=attractor_alpha, gamma=attractor_gamma, kind=attractor_kind, attractor_type=attractor_type)
115
+ for i in range(len(num_out_features))
116
+ ])
117
+
118
+ last_in = N_MIDAS_OUT + 1 # +1 for relative depth
119
+
120
+ # use log binomial instead of softmax
121
+ self.conditional_log_binomial = ConditionalLogBinomial(
122
+ last_in, bin_embedding_dim, n_classes=n_bins, min_temp=min_temp, max_temp=max_temp)
123
+
124
+ def forward(self, x, return_final_centers=False, denorm=False, return_probs=False, **kwargs):
125
+ """
126
+ Args:
127
+ x (torch.Tensor): Input image tensor of shape (B, C, H, W)
128
+ return_final_centers (bool, optional): Whether to return the final bin centers. Defaults to False.
129
+ denorm (bool, optional): Whether to denormalize the input image. This reverses ImageNet normalization as midas normalization is different. Defaults to False.
130
+ return_probs (bool, optional): Whether to return the output probability distribution. Defaults to False.
131
+
132
+ Returns:
133
+ dict: Dictionary containing the following keys:
134
+ - rel_depth (torch.Tensor): Relative depth map of shape (B, H, W)
135
+ - metric_depth (torch.Tensor): Metric depth map of shape (B, 1, H, W)
136
+ - bin_centers (torch.Tensor): Bin centers of shape (B, n_bins). Present only if return_final_centers is True
137
+ - probs (torch.Tensor): Output probability distribution of shape (B, n_bins, H, W). Present only if return_probs is True
138
+
139
+ """
140
+ b, c, h, w = x.shape
141
+ # print("input shape ", x.shape)
142
+ self.orig_input_width = w
143
+ self.orig_input_height = h
144
+ rel_depth, out = self.core(x, denorm=denorm, return_rel_depth=True)
145
+ # print("output shapes", rel_depth.shape, out.shape)
146
+
147
+ outconv_activation = out[0]
148
+ btlnck = out[1]
149
+ x_blocks = out[2:]
150
+
151
+ x_d0 = self.conv2(btlnck)
152
+ x = x_d0
153
+ _, seed_b_centers = self.seed_bin_regressor(x)
154
+
155
+ if self.bin_centers_type == 'normed' or self.bin_centers_type == 'hybrid2':
156
+ b_prev = (seed_b_centers - self.min_depth) / \
157
+ (self.max_depth - self.min_depth)
158
+ else:
159
+ b_prev = seed_b_centers
160
+
161
+ prev_b_embedding = self.seed_projector(x)
162
+
163
+ # unroll this loop for better performance
164
+ for projector, attractor, x in zip(self.projectors, self.attractors, x_blocks):
165
+ b_embedding = projector(x)
166
+ b, b_centers = attractor(
167
+ b_embedding, b_prev, prev_b_embedding, interpolate=True)
168
+ b_prev = b.clone()
169
+ prev_b_embedding = b_embedding.clone()
170
+
171
+ last = outconv_activation
172
+
173
+ if self.inverse_midas:
174
+ # invert depth followed by normalization
175
+ rel_depth = 1.0 / (rel_depth + 1e-6)
176
+ rel_depth = (rel_depth - rel_depth.min()) / \
177
+ (rel_depth.max() - rel_depth.min())
178
+ # concat rel depth with last. First interpolate rel depth to last size
179
+ rel_cond = rel_depth.unsqueeze(1)
180
+ rel_cond = nn.functional.interpolate(
181
+ rel_cond, size=last.shape[2:], mode='bilinear', align_corners=True)
182
+ last = torch.cat([last, rel_cond], dim=1)
183
+
184
+ b_embedding = nn.functional.interpolate(
185
+ b_embedding, last.shape[-2:], mode='bilinear', align_corners=True)
186
+ x = self.conditional_log_binomial(last, b_embedding)
187
+
188
+ # Now depth value is Sum px * cx , where cx are bin_centers from the last bin tensor
189
+ # print(x.shape, b_centers.shape)
190
+ b_centers = nn.functional.interpolate(
191
+ b_centers, x.shape[-2:], mode='bilinear', align_corners=True)
192
+ out = torch.sum(x * b_centers, dim=1, keepdim=True)
193
+
194
+ # Structure output dict
195
+ output = dict(metric_depth=out)
196
+ if return_final_centers or return_probs:
197
+ output['bin_centers'] = b_centers
198
+
199
+ if return_probs:
200
+ output['probs'] = x
201
+
202
+ return output
203
+
204
+ def get_lr_params(self, lr):
205
+ """
206
+ Learning rate configuration for different layers of the model
207
+ Args:
208
+ lr (float) : Base learning rate
209
+ Returns:
210
+ list : list of parameters to optimize and their learning rates, in the format required by torch optimizers.
211
+ """
212
+ param_conf = []
213
+ if self.train_midas:
214
+ if self.encoder_lr_factor > 0:
215
+ param_conf.append({'params': self.core.get_enc_params_except_rel_pos(
216
+ ), 'lr': lr / self.encoder_lr_factor})
217
+
218
+ if self.pos_enc_lr_factor > 0:
219
+ param_conf.append(
220
+ {'params': self.core.get_rel_pos_params(), 'lr': lr / self.pos_enc_lr_factor})
221
+
222
+ midas_params = self.core.core.scratch.parameters()
223
+ midas_lr_factor = self.midas_lr_factor
224
+ param_conf.append(
225
+ {'params': midas_params, 'lr': lr / midas_lr_factor})
226
+
227
+ remaining_modules = []
228
+ for name, child in self.named_children():
229
+ if name != 'core':
230
+ remaining_modules.append(child)
231
+ remaining_params = itertools.chain(
232
+ *[child.parameters() for child in remaining_modules])
233
+
234
+ param_conf.append({'params': remaining_params, 'lr': lr})
235
+
236
+ return param_conf
237
+
238
+ @staticmethod
239
+ def build(midas_model_type="DPT_BEiT_L_384", pretrained_resource=None, use_pretrained_midas=False, train_midas=False, freeze_midas_bn=True, **kwargs):
240
+ core = MidasCore.build(midas_model_type=midas_model_type, use_pretrained_midas=use_pretrained_midas,
241
+ train_midas=train_midas, fetch_features=True, freeze_bn=freeze_midas_bn, **kwargs)
242
+ model = ZoeDepth(core, **kwargs)
243
+ if pretrained_resource:
244
+ assert isinstance(pretrained_resource, str), "pretrained_resource must be a string"
245
+ model = load_state_from_resource(model, pretrained_resource)
246
+ return model
247
+
248
+ @staticmethod
249
+ def build_from_config(config):
250
+ return ZoeDepth.build(**config)
CCEdit-main/src/controlnet11/gradio_annotator.py ADDED
@@ -0,0 +1,376 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ from annotator.util import resize_image, HWC3
4
+
5
+
6
+ model_canny = None
7
+
8
+
9
+ def canny(img, res, l, h):
10
+ img = resize_image(HWC3(img), res)
11
+ global model_canny
12
+ if model_canny is None:
13
+ from annotator.canny import CannyDetector
14
+ model_canny = CannyDetector()
15
+ result = model_canny(img, l, h)
16
+ return [result]
17
+
18
+
19
+ model_hed = None
20
+
21
+
22
+ def hed(img, res):
23
+ img = resize_image(HWC3(img), res)
24
+ global model_hed
25
+ if model_hed is None:
26
+ from annotator.hed import HEDdetector
27
+ model_hed = HEDdetector()
28
+ result = model_hed(img)
29
+ return [result]
30
+
31
+
32
+ model_pidi = None
33
+
34
+
35
+ def pidi(img, res):
36
+ img = resize_image(HWC3(img), res)
37
+ global model_pidi
38
+ if model_pidi is None:
39
+ from annotator.pidinet import PidiNetDetector
40
+ model_pidi = PidiNetDetector()
41
+ result = model_pidi(img)
42
+ return [result]
43
+
44
+
45
+ model_mlsd = None
46
+
47
+
48
+ def mlsd(img, res, thr_v, thr_d):
49
+ img = resize_image(HWC3(img), res)
50
+ global model_mlsd
51
+ if model_mlsd is None:
52
+ from annotator.mlsd import MLSDdetector
53
+ model_mlsd = MLSDdetector()
54
+ result = model_mlsd(img, thr_v, thr_d)
55
+ return [result]
56
+
57
+
58
+ model_midas = None
59
+
60
+
61
+ def midas(img, res):
62
+ img = resize_image(HWC3(img), res)
63
+ global model_midas
64
+ if model_midas is None:
65
+ from annotator.midas import MidasDetector
66
+ model_midas = MidasDetector()
67
+ result = model_midas(img)
68
+ return [result]
69
+
70
+
71
+ model_zoe = None
72
+
73
+
74
+ def zoe(img, res):
75
+ img = resize_image(HWC3(img), res)
76
+ global model_zoe
77
+ if model_zoe is None:
78
+ from annotator.zoe import ZoeDetector
79
+ model_zoe = ZoeDetector()
80
+ result = model_zoe(img)
81
+ return [result]
82
+
83
+
84
+ model_normalbae = None
85
+
86
+
87
+ def normalbae(img, res):
88
+ img = resize_image(HWC3(img), res)
89
+ global model_normalbae
90
+ if model_normalbae is None:
91
+ from annotator.normalbae import NormalBaeDetector
92
+ model_normalbae = NormalBaeDetector()
93
+ result = model_normalbae(img)
94
+ return [result]
95
+
96
+
97
+ model_openpose = None
98
+
99
+
100
+ def openpose(img, res, hand_and_face):
101
+ img = resize_image(HWC3(img), res)
102
+ global model_openpose
103
+ if model_openpose is None:
104
+ from annotator.openpose import OpenposeDetector
105
+ model_openpose = OpenposeDetector()
106
+ result = model_openpose(img, hand_and_face)
107
+ return [result]
108
+
109
+
110
+ model_uniformer = None
111
+
112
+
113
+ def uniformer(img, res):
114
+ img = resize_image(HWC3(img), res)
115
+ global model_uniformer
116
+ if model_uniformer is None:
117
+ from annotator.uniformer import UniformerDetector
118
+ model_uniformer = UniformerDetector()
119
+ result = model_uniformer(img)
120
+ return [result]
121
+
122
+
123
+ model_lineart_anime = None
124
+
125
+
126
+ def lineart_anime(img, res):
127
+ img = resize_image(HWC3(img), res)
128
+ global model_lineart_anime
129
+ if model_lineart_anime is None:
130
+ from annotator.lineart_anime import LineartAnimeDetector
131
+ model_lineart_anime = LineartAnimeDetector()
132
+ result = model_lineart_anime(img)
133
+ return [result]
134
+
135
+
136
+ model_lineart = None
137
+
138
+
139
+ def lineart(img, res, coarse=False):
140
+ img = resize_image(HWC3(img), res)
141
+ global model_lineart
142
+ if model_lineart is None:
143
+ from annotator.lineart import LineartDetector
144
+ model_lineart = LineartDetector()
145
+ result = model_lineart(img, coarse)
146
+ return [result]
147
+
148
+
149
+ model_oneformer_coco = None
150
+
151
+
152
+ def oneformer_coco(img, res):
153
+ img = resize_image(HWC3(img), res)
154
+ global model_oneformer_coco
155
+ if model_oneformer_coco is None:
156
+ from annotator.oneformer import OneformerCOCODetector
157
+ model_oneformer_coco = OneformerCOCODetector()
158
+ result = model_oneformer_coco(img)
159
+ return [result]
160
+
161
+
162
+ model_oneformer_ade20k = None
163
+
164
+
165
+ def oneformer_ade20k(img, res):
166
+ img = resize_image(HWC3(img), res)
167
+ global model_oneformer_ade20k
168
+ if model_oneformer_ade20k is None:
169
+ from annotator.oneformer import OneformerADE20kDetector
170
+ model_oneformer_ade20k = OneformerADE20kDetector()
171
+ result = model_oneformer_ade20k(img)
172
+ return [result]
173
+
174
+
175
+ model_content_shuffler = None
176
+
177
+
178
+ def content_shuffler(img, res):
179
+ img = resize_image(HWC3(img), res)
180
+ global model_content_shuffler
181
+ if model_content_shuffler is None:
182
+ from annotator.shuffle import ContentShuffleDetector
183
+ model_content_shuffler = ContentShuffleDetector()
184
+ result = model_content_shuffler(img)
185
+ return [result]
186
+
187
+
188
+ model_color_shuffler = None
189
+
190
+
191
+ def color_shuffler(img, res):
192
+ img = resize_image(HWC3(img), res)
193
+ global model_color_shuffler
194
+ if model_color_shuffler is None:
195
+ from annotator.shuffle import ColorShuffleDetector
196
+ model_color_shuffler = ColorShuffleDetector()
197
+ result = model_color_shuffler(img)
198
+ return [result]
199
+
200
+
201
+ block = gr.Blocks().queue()
202
+ with block:
203
+ with gr.Row():
204
+ gr.Markdown("## Canny Edge")
205
+ with gr.Row():
206
+ with gr.Column():
207
+ input_image = gr.Image(source='upload', type="numpy")
208
+ low_threshold = gr.Slider(label="low_threshold", minimum=1, maximum=255, value=100, step=1)
209
+ high_threshold = gr.Slider(label="high_threshold", minimum=1, maximum=255, value=200, step=1)
210
+ resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
211
+ run_button = gr.Button(label="Run")
212
+ with gr.Column():
213
+ gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
214
+ run_button.click(fn=canny, inputs=[input_image, resolution, low_threshold, high_threshold], outputs=[gallery])
215
+
216
+ with gr.Row():
217
+ gr.Markdown("## HED Edge")
218
+ with gr.Row():
219
+ with gr.Column():
220
+ input_image = gr.Image(source='upload', type="numpy")
221
+ resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
222
+ run_button = gr.Button(label="Run")
223
+ with gr.Column():
224
+ gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
225
+ run_button.click(fn=hed, inputs=[input_image, resolution], outputs=[gallery])
226
+
227
+ with gr.Row():
228
+ gr.Markdown("## Pidi Edge")
229
+ with gr.Row():
230
+ with gr.Column():
231
+ input_image = gr.Image(source='upload', type="numpy")
232
+ resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
233
+ run_button = gr.Button(label="Run")
234
+ with gr.Column():
235
+ gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
236
+ run_button.click(fn=pidi, inputs=[input_image, resolution], outputs=[gallery])
237
+
238
+ with gr.Row():
239
+ gr.Markdown("## MLSD Edge")
240
+ with gr.Row():
241
+ with gr.Column():
242
+ input_image = gr.Image(source='upload', type="numpy")
243
+ value_threshold = gr.Slider(label="value_threshold", minimum=0.01, maximum=2.0, value=0.1, step=0.01)
244
+ distance_threshold = gr.Slider(label="distance_threshold", minimum=0.01, maximum=20.0, value=0.1, step=0.01)
245
+ resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=384, step=64)
246
+ run_button = gr.Button(label="Run")
247
+ with gr.Column():
248
+ gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
249
+ run_button.click(fn=mlsd, inputs=[input_image, resolution, value_threshold, distance_threshold], outputs=[gallery])
250
+
251
+ with gr.Row():
252
+ gr.Markdown("## MIDAS Depth")
253
+ with gr.Row():
254
+ with gr.Column():
255
+ input_image = gr.Image(source='upload', type="numpy")
256
+ resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=384, step=64)
257
+ run_button = gr.Button(label="Run")
258
+ with gr.Column():
259
+ gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
260
+ run_button.click(fn=midas, inputs=[input_image, resolution], outputs=[gallery])
261
+
262
+
263
+ with gr.Row():
264
+ gr.Markdown("## Zoe Depth")
265
+ with gr.Row():
266
+ with gr.Column():
267
+ input_image = gr.Image(source='upload', type="numpy")
268
+ resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
269
+ run_button = gr.Button(label="Run")
270
+ with gr.Column():
271
+ gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
272
+ run_button.click(fn=zoe, inputs=[input_image, resolution], outputs=[gallery])
273
+
274
+ with gr.Row():
275
+ gr.Markdown("## Normal Bae")
276
+ with gr.Row():
277
+ with gr.Column():
278
+ input_image = gr.Image(source='upload', type="numpy")
279
+ resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
280
+ run_button = gr.Button(label="Run")
281
+ with gr.Column():
282
+ gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
283
+ run_button.click(fn=normalbae, inputs=[input_image, resolution], outputs=[gallery])
284
+
285
+ with gr.Row():
286
+ gr.Markdown("## Openpose")
287
+ with gr.Row():
288
+ with gr.Column():
289
+ input_image = gr.Image(source='upload', type="numpy")
290
+ hand_and_face = gr.Checkbox(label='Hand and Face', value=False)
291
+ resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
292
+ run_button = gr.Button(label="Run")
293
+ with gr.Column():
294
+ gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
295
+ run_button.click(fn=openpose, inputs=[input_image, resolution, hand_and_face], outputs=[gallery])
296
+
297
+ with gr.Row():
298
+ gr.Markdown("## Lineart Anime")
299
+ with gr.Row():
300
+ with gr.Column():
301
+ input_image = gr.Image(source='upload', type="numpy")
302
+ resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
303
+ run_button = gr.Button(label="Run")
304
+ with gr.Column():
305
+ gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
306
+ run_button.click(fn=lineart_anime, inputs=[input_image, resolution], outputs=[gallery])
307
+
308
+ with gr.Row():
309
+ gr.Markdown("## Lineart")
310
+ with gr.Row():
311
+ with gr.Column():
312
+ input_image = gr.Image(source='upload', type="numpy")
313
+ coarse = gr.Checkbox(label='Using coarse model', value=False)
314
+ resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
315
+ run_button = gr.Button(label="Run")
316
+ with gr.Column():
317
+ gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
318
+ run_button.click(fn=lineart, inputs=[input_image, resolution, coarse], outputs=[gallery])
319
+
320
+ with gr.Row():
321
+ gr.Markdown("## Uniformer Segmentation")
322
+ with gr.Row():
323
+ with gr.Column():
324
+ input_image = gr.Image(source='upload', type="numpy")
325
+ resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
326
+ run_button = gr.Button(label="Run")
327
+ with gr.Column():
328
+ gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
329
+ run_button.click(fn=uniformer, inputs=[input_image, resolution], outputs=[gallery])
330
+
331
+ with gr.Row():
332
+ gr.Markdown("## Oneformer COCO Segmentation")
333
+ with gr.Row():
334
+ with gr.Column():
335
+ input_image = gr.Image(source='upload', type="numpy")
336
+ resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
337
+ run_button = gr.Button(label="Run")
338
+ with gr.Column():
339
+ gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
340
+ run_button.click(fn=oneformer_coco, inputs=[input_image, resolution], outputs=[gallery])
341
+
342
+ with gr.Row():
343
+ gr.Markdown("## Oneformer ADE20K Segmentation")
344
+ with gr.Row():
345
+ with gr.Column():
346
+ input_image = gr.Image(source='upload', type="numpy")
347
+ resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=640, step=64)
348
+ run_button = gr.Button(label="Run")
349
+ with gr.Column():
350
+ gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
351
+ run_button.click(fn=oneformer_ade20k, inputs=[input_image, resolution], outputs=[gallery])
352
+
353
+ with gr.Row():
354
+ gr.Markdown("## Content Shuffle")
355
+ with gr.Row():
356
+ with gr.Column():
357
+ input_image = gr.Image(source='upload', type="numpy")
358
+ resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
359
+ run_button = gr.Button(label="Run")
360
+ with gr.Column():
361
+ gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
362
+ run_button.click(fn=content_shuffler, inputs=[input_image, resolution], outputs=[gallery])
363
+
364
+ with gr.Row():
365
+ gr.Markdown("## Color Shuffle")
366
+ with gr.Row():
367
+ with gr.Column():
368
+ input_image = gr.Image(source='upload', type="numpy")
369
+ resolution = gr.Slider(label="resolution", minimum=256, maximum=1024, value=512, step=64)
370
+ run_button = gr.Button(label="Run")
371
+ with gr.Column():
372
+ gallery = gr.Gallery(label="Generated images", show_label=False).style(height="auto")
373
+ run_button.click(fn=color_shuffler, inputs=[input_image, resolution], outputs=[gallery])
374
+
375
+
376
+ block.launch(server_name='0.0.0.0')
CCEdit-main/src/controlnet11/gradio_inpaint.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from share import *
2
+ import config
3
+
4
+ import cv2
5
+ import einops
6
+ import gradio as gr
7
+ import numpy as np
8
+ import torch
9
+ import random
10
+
11
+ from pytorch_lightning import seed_everything
12
+ from annotator.util import resize_image, HWC3
13
+ from cldm.model import create_model, load_state_dict
14
+ from cldm.ddim_hacked import DDIMSampler
15
+
16
+
17
+ model_name = 'control_v11p_sd15_inpaint'
18
+ model = create_model(f'./models/{model_name}.yaml').cpu()
19
+ model.load_state_dict(load_state_dict('./models/v1-5-pruned.ckpt', location='cuda'), strict=False)
20
+ model.load_state_dict(load_state_dict(f'./models/{model_name}.pth', location='cuda'), strict=False)
21
+ model = model.cuda()
22
+ ddim_sampler = DDIMSampler(model)
23
+
24
+
25
+ def process(input_image_and_mask, prompt, a_prompt, n_prompt, num_samples, image_resolution, ddim_steps, guess_mode, strength, scale, seed, eta, mask_blur):
26
+ with torch.no_grad():
27
+ input_image = HWC3(input_image_and_mask['image'])
28
+ input_mask = input_image_and_mask['mask']
29
+
30
+ img_raw = resize_image(input_image, image_resolution).astype(np.float32)
31
+ H, W, C = img_raw.shape
32
+
33
+ mask_pixel = cv2.resize(input_mask[:, :, 0], (W, H), interpolation=cv2.INTER_LINEAR).astype(np.float32) / 255.0
34
+ mask_pixel = cv2.GaussianBlur(mask_pixel, (0, 0), mask_blur)
35
+
36
+ mask_latent = cv2.resize(mask_pixel, (W // 8, H // 8), interpolation=cv2.INTER_AREA)
37
+
38
+ detected_map = img_raw.copy()
39
+ detected_map[mask_pixel > 0.5] = - 255.0
40
+
41
+ control = torch.from_numpy(detected_map.copy()).float().cuda() / 255.0
42
+ control = torch.stack([control for _ in range(num_samples)], dim=0)
43
+ control = einops.rearrange(control, 'b h w c -> b c h w').clone()
44
+
45
+ mask = 1.0 - torch.from_numpy(mask_latent.copy()).float().cuda()
46
+ mask = torch.stack([mask for _ in range(num_samples)], dim=0)
47
+ mask = einops.rearrange(mask, 'b h w -> b 1 h w').clone()
48
+
49
+ x0 = torch.from_numpy(img_raw.copy()).float().cuda() / 127.0 - 1.0
50
+ x0 = torch.stack([x0 for _ in range(num_samples)], dim=0)
51
+ x0 = einops.rearrange(x0, 'b h w c -> b c h w').clone()
52
+
53
+ mask_pixel_batched = mask_pixel[None, :, :, None]
54
+ img_pixel_batched = img_raw.copy()[None]
55
+
56
+ if seed == -1:
57
+ seed = random.randint(0, 65535)
58
+ seed_everything(seed)
59
+
60
+ if config.save_memory:
61
+ model.low_vram_shift(is_diffusing=False)
62
+
63
+ cond = {"c_concat": [control], "c_crossattn": [model.get_learned_conditioning([prompt + ', ' + a_prompt] * num_samples)]}
64
+ un_cond = {"c_concat": None if guess_mode else [control], "c_crossattn": [model.get_learned_conditioning([n_prompt] * num_samples)]}
65
+ shape = (4, H // 8, W // 8)
66
+
67
+ if config.save_memory:
68
+ model.low_vram_shift(is_diffusing=False)
69
+
70
+ ddim_sampler.make_schedule(ddim_steps, ddim_eta=eta, verbose=True)
71
+ x0 = model.get_first_stage_encoding(model.encode_first_stage(x0))
72
+
73
+ if config.save_memory:
74
+ model.low_vram_shift(is_diffusing=True)
75
+
76
+ model.control_scales = [strength * (0.825 ** float(12 - i)) for i in range(13)] if guess_mode else ([strength] * 13)
77
+ # Magic number. IDK why. Perhaps because 0.825**12<0.01 but 0.826**12>0.01
78
+
79
+ samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
80
+ shape, cond, verbose=False, eta=eta,
81
+ unconditional_guidance_scale=scale,
82
+ unconditional_conditioning=un_cond, x0=x0, mask=mask)
83
+
84
+ if config.save_memory:
85
+ model.low_vram_shift(is_diffusing=False)
86
+
87
+ x_samples = model.decode_first_stage(samples)
88
+ x_samples = (einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + 127.5).cpu().numpy().astype(np.float32)
89
+ x_samples = x_samples * mask_pixel_batched + img_pixel_batched * (1.0 - mask_pixel_batched)
90
+
91
+ results = [x_samples[i].clip(0, 255).astype(np.uint8) for i in range(num_samples)]
92
+ return [detected_map.clip(0, 255).astype(np.uint8)] + results
93
+
94
+
95
+ block = gr.Blocks().queue()
96
+ with block:
97
+ with gr.Row():
98
+ gr.Markdown("## Control Stable Diffusion with Inpaint Mask")
99
+ with gr.Row():
100
+ with gr.Column():
101
+ input_image = gr.Image(source='upload', type="numpy", tool="sketch")
102
+ prompt = gr.Textbox(label="Prompt")
103
+ run_button = gr.Button(label="Run")
104
+ num_samples = gr.Slider(label="Images", minimum=1, maximum=12, value=1, step=1)
105
+ seed = gr.Slider(label="Seed", minimum=-1, maximum=2147483647, step=1, value=12345)
106
+ mask_blur = gr.Slider(label="Mask Blur", minimum=0.1, maximum=7.0, value=5.0, step=0.01)
107
+ with gr.Accordion("Advanced options", open=False):
108
+ image_resolution = gr.Slider(label="Image Resolution", minimum=256, maximum=768, value=512, step=64)
109
+ strength = gr.Slider(label="Control Strength", minimum=0.0, maximum=2.0, value=1.0, step=0.01)
110
+ guess_mode = gr.Checkbox(label='Guess Mode', value=False)
111
+ ddim_steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=20, step=1)
112
+ scale = gr.Slider(label="Guidance Scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
113
+ eta = gr.Slider(label="DDIM ETA", minimum=0.0, maximum=1.0, value=1.0, step=0.01)
114
+ a_prompt = gr.Textbox(label="Added Prompt", value='best quality')
115
+ n_prompt = gr.Textbox(label="Negative Prompt", value='lowres, bad anatomy, bad hands, cropped, worst quality')
116
+ with gr.Column():
117
+ result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery").style(grid=2, height='auto')
118
+ ips = [input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, ddim_steps, guess_mode, strength, scale, seed, eta, mask_blur]
119
+ run_button.click(fn=process, inputs=ips, outputs=[result_gallery])
120
+
121
+
122
+ block.launch(server_name='0.0.0.0')
CCEdit-main/src/controlnet11/gradio_ip2p.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from share import *
2
+ import config
3
+
4
+ import cv2
5
+ import einops
6
+ import gradio as gr
7
+ import numpy as np
8
+ import torch
9
+ import random
10
+
11
+ from pytorch_lightning import seed_everything
12
+ from annotator.util import resize_image, HWC3
13
+ from cldm.model import create_model, load_state_dict
14
+ from cldm.ddim_hacked import DDIMSampler
15
+
16
+
17
+ model_name = 'control_v11e_sd15_ip2p'
18
+ model = create_model(f'./models/{model_name}.yaml').cpu()
19
+ model.load_state_dict(load_state_dict('./models/v1-5-pruned.ckpt', location='cuda'), strict=False)
20
+ model.load_state_dict(load_state_dict(f'./models/{model_name}.pth', location='cuda'), strict=False)
21
+ model = model.cuda()
22
+ ddim_sampler = DDIMSampler(model)
23
+
24
+
25
+ def process(input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, ddim_steps, guess_mode, strength, scale, seed, eta):
26
+ with torch.no_grad():
27
+ input_image = HWC3(input_image)
28
+
29
+ detected_map = input_image.copy()
30
+
31
+ img = resize_image(input_image, image_resolution)
32
+ H, W, C = img.shape
33
+
34
+ detected_map = cv2.resize(detected_map, (W, H), interpolation=cv2.INTER_LINEAR)
35
+
36
+ control = torch.from_numpy(detected_map.copy()).float().cuda() / 255.0
37
+ control = torch.stack([control for _ in range(num_samples)], dim=0)
38
+ control = einops.rearrange(control, 'b h w c -> b c h w').clone()
39
+
40
+ if seed == -1:
41
+ seed = random.randint(0, 65535)
42
+ seed_everything(seed)
43
+
44
+ if config.save_memory:
45
+ model.low_vram_shift(is_diffusing=False)
46
+
47
+ cond = {"c_concat": [control], "c_crossattn": [model.get_learned_conditioning([prompt + ', ' + a_prompt] * num_samples)]}
48
+ un_cond = {"c_concat": None if guess_mode else [control], "c_crossattn": [model.get_learned_conditioning([n_prompt] * num_samples)]}
49
+ shape = (4, H // 8, W // 8)
50
+
51
+ if config.save_memory:
52
+ model.low_vram_shift(is_diffusing=True)
53
+
54
+ model.control_scales = [strength * (0.825 ** float(12 - i)) for i in range(13)] if guess_mode else ([strength] * 13)
55
+ # Magic number. IDK why. Perhaps because 0.825**12<0.01 but 0.826**12>0.01
56
+
57
+ samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
58
+ shape, cond, verbose=False, eta=eta,
59
+ unconditional_guidance_scale=scale,
60
+ unconditional_conditioning=un_cond)
61
+
62
+ if config.save_memory:
63
+ model.low_vram_shift(is_diffusing=False)
64
+
65
+ x_samples = model.decode_first_stage(samples)
66
+ x_samples = (einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + 127.5).cpu().numpy().clip(0, 255).astype(np.uint8)
67
+
68
+ results = [x_samples[i] for i in range(num_samples)]
69
+ return [detected_map] + results
70
+
71
+
72
+ block = gr.Blocks().queue()
73
+ with block:
74
+ with gr.Row():
75
+ gr.Markdown("## Control Stable Diffusion with Instruct Pix2Pix")
76
+ with gr.Row():
77
+ with gr.Column():
78
+ input_image = gr.Image(source='upload', type="numpy")
79
+ prompt = gr.Textbox(label="Prompt")
80
+ run_button = gr.Button(label="Run")
81
+ num_samples = gr.Slider(label="Images", minimum=1, maximum=12, value=1, step=1)
82
+ seed = gr.Slider(label="Seed", minimum=-1, maximum=2147483647, step=1, value=12345)
83
+ det = gr.Radio(choices=["None"], type="value", value="None", label="Preprocessor")
84
+ with gr.Accordion("Advanced options", open=False):
85
+ image_resolution = gr.Slider(label="Image Resolution", minimum=256, maximum=768, value=512, step=64)
86
+ strength = gr.Slider(label="Control Strength", minimum=0.0, maximum=2.0, value=1.0, step=0.01)
87
+ guess_mode = gr.Checkbox(label='Guess Mode', value=False)
88
+ ddim_steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=20, step=1)
89
+ scale = gr.Slider(label="Guidance Scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
90
+ eta = gr.Slider(label="DDIM ETA", minimum=0.0, maximum=1.0, value=1.0, step=0.01)
91
+ a_prompt = gr.Textbox(label="Added Prompt", value='best quality')
92
+ n_prompt = gr.Textbox(label="Negative Prompt", value='lowres, bad anatomy, bad hands, cropped, worst quality')
93
+ with gr.Column():
94
+ result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery").style(grid=2, height='auto')
95
+ ips = [input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, ddim_steps, guess_mode, strength, scale, seed, eta]
96
+ run_button.click(fn=process, inputs=ips, outputs=[result_gallery])
97
+
98
+
99
+ block.launch(server_name='0.0.0.0')
CCEdit-main/src/controlnet11/gradio_lineart.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from share import *
2
+ import config
3
+
4
+ import cv2
5
+ import einops
6
+ import gradio as gr
7
+ import numpy as np
8
+ import torch
9
+ import random
10
+
11
+ from pytorch_lightning import seed_everything
12
+ from annotator.util import resize_image, HWC3
13
+ from annotator.lineart import LineartDetector
14
+ from cldm.model import create_model, load_state_dict
15
+ from cldm.ddim_hacked import DDIMSampler
16
+
17
+
18
+ preprocessor = None
19
+
20
+ model_name = 'control_v11p_sd15_lineart'
21
+ model = create_model(f'./models/{model_name}.yaml').cpu()
22
+ model.load_state_dict(load_state_dict('./models/v1-5-pruned.ckpt', location='cuda'), strict=False)
23
+ model.load_state_dict(load_state_dict(f'./models/{model_name}.pth', location='cuda'), strict=False)
24
+ model = model.cuda()
25
+ ddim_sampler = DDIMSampler(model)
26
+
27
+
28
+ def process(det, input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, guess_mode, strength, scale, seed, eta):
29
+ global preprocessor
30
+ if 'Lineart' in det:
31
+ if not isinstance(preprocessor, LineartDetector):
32
+ preprocessor = LineartDetector()
33
+
34
+ with torch.no_grad():
35
+ input_image = HWC3(input_image)
36
+
37
+ if det == 'None':
38
+ detected_map = input_image.copy()
39
+ else:
40
+ detected_map = preprocessor(resize_image(input_image, detect_resolution), coarse='Coarse' in det)
41
+ detected_map = HWC3(detected_map)
42
+
43
+ img = resize_image(input_image, image_resolution)
44
+ H, W, C = img.shape
45
+
46
+ detected_map = cv2.resize(detected_map, (W, H), interpolation=cv2.INTER_LINEAR)
47
+
48
+ control = 1.0 - torch.from_numpy(detected_map.copy()).float().cuda() / 255.0
49
+ control = torch.stack([control for _ in range(num_samples)], dim=0)
50
+ control = einops.rearrange(control, 'b h w c -> b c h w').clone()
51
+
52
+ if seed == -1:
53
+ seed = random.randint(0, 65535)
54
+ seed_everything(seed)
55
+
56
+ if config.save_memory:
57
+ model.low_vram_shift(is_diffusing=False)
58
+
59
+ cond = {"c_concat": [control], "c_crossattn": [model.get_learned_conditioning([prompt + ', ' + a_prompt] * num_samples)]}
60
+ un_cond = {"c_concat": None if guess_mode else [control], "c_crossattn": [model.get_learned_conditioning([n_prompt] * num_samples)]}
61
+ shape = (4, H // 8, W // 8)
62
+
63
+ if config.save_memory:
64
+ model.low_vram_shift(is_diffusing=True)
65
+
66
+ model.control_scales = [strength * (0.825 ** float(12 - i)) for i in range(13)] if guess_mode else ([strength] * 13)
67
+ # Magic number. IDK why. Perhaps because 0.825**12<0.01 but 0.826**12>0.01
68
+
69
+ samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
70
+ shape, cond, verbose=False, eta=eta,
71
+ unconditional_guidance_scale=scale,
72
+ unconditional_conditioning=un_cond)
73
+
74
+ if config.save_memory:
75
+ model.low_vram_shift(is_diffusing=False)
76
+
77
+ x_samples = model.decode_first_stage(samples)
78
+ x_samples = (einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + 127.5).cpu().numpy().clip(0, 255).astype(np.uint8)
79
+
80
+ results = [x_samples[i] for i in range(num_samples)]
81
+ return [detected_map] + results
82
+
83
+
84
+ block = gr.Blocks().queue()
85
+ with block:
86
+ with gr.Row():
87
+ gr.Markdown("## Control Stable Diffusion with Lineart")
88
+ with gr.Row():
89
+ with gr.Column():
90
+ input_image = gr.Image(source='upload', type="numpy")
91
+ prompt = gr.Textbox(label="Prompt")
92
+ run_button = gr.Button(label="Run")
93
+ num_samples = gr.Slider(label="Images", minimum=1, maximum=12, value=1, step=1)
94
+ seed = gr.Slider(label="Seed", minimum=-1, maximum=2147483647, step=1, value=12345)
95
+ det = gr.Radio(choices=["Lineart", "Lineart_Coarse", "None"], type="value", value="Lineart", label="Preprocessor")
96
+ with gr.Accordion("Advanced options", open=False):
97
+ image_resolution = gr.Slider(label="Image Resolution", minimum=256, maximum=768, value=512, step=64)
98
+ strength = gr.Slider(label="Control Strength", minimum=0.0, maximum=2.0, value=1.0, step=0.01)
99
+ guess_mode = gr.Checkbox(label='Guess Mode', value=False)
100
+ detect_resolution = gr.Slider(label="Preprocessor Resolution", minimum=128, maximum=1024, value=512, step=1)
101
+ ddim_steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=20, step=1)
102
+ scale = gr.Slider(label="Guidance Scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
103
+ eta = gr.Slider(label="DDIM ETA", minimum=0.0, maximum=1.0, value=1.0, step=0.01)
104
+ a_prompt = gr.Textbox(label="Added Prompt", value='best quality')
105
+ n_prompt = gr.Textbox(label="Negative Prompt", value='lowres, bad anatomy, bad hands, cropped, worst quality')
106
+ with gr.Column():
107
+ result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery").style(grid=2, height='auto')
108
+ ips = [det, input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, guess_mode, strength, scale, seed, eta]
109
+ run_button.click(fn=process, inputs=ips, outputs=[result_gallery])
110
+
111
+
112
+ block.launch(server_name='0.0.0.0')
CCEdit-main/src/controlnet11/gradio_mlsd.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from share import *
2
+ import config
3
+
4
+ import cv2
5
+ import einops
6
+ import gradio as gr
7
+ import numpy as np
8
+ import torch
9
+ import random
10
+
11
+ from pytorch_lightning import seed_everything
12
+ from annotator.util import resize_image, HWC3
13
+ from annotator.mlsd import MLSDdetector
14
+ from cldm.model import create_model, load_state_dict
15
+ from cldm.ddim_hacked import DDIMSampler
16
+
17
+
18
+ preprocessor = None
19
+
20
+ model_name = 'control_v11p_sd15_mlsd'
21
+ model = create_model(f'./models/{model_name}.yaml').cpu()
22
+ model.load_state_dict(load_state_dict('./models/v1-5-pruned.ckpt', location='cuda'), strict=False)
23
+ model.load_state_dict(load_state_dict(f'./models/{model_name}.pth', location='cuda'), strict=False)
24
+ model = model.cuda()
25
+ ddim_sampler = DDIMSampler(model)
26
+
27
+
28
+ def process(det, input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, guess_mode, strength, scale, seed, eta, value_threshold, distance_threshold):
29
+ global preprocessor
30
+
31
+ if det == 'MLSD':
32
+ if not isinstance(preprocessor, MLSDdetector):
33
+ preprocessor = MLSDdetector()
34
+
35
+ with torch.no_grad():
36
+ input_image = HWC3(input_image)
37
+
38
+ if det == 'None':
39
+ detected_map = input_image.copy()
40
+ else:
41
+ detected_map = preprocessor(resize_image(input_image, detect_resolution), value_threshold, distance_threshold)
42
+ detected_map = HWC3(detected_map)
43
+
44
+ img = resize_image(input_image, image_resolution)
45
+ H, W, C = img.shape
46
+
47
+ detected_map = cv2.resize(detected_map, (W, H), interpolation=cv2.INTER_LINEAR)
48
+
49
+ control = torch.from_numpy(detected_map.copy()).float().cuda() / 255.0
50
+ control = torch.stack([control for _ in range(num_samples)], dim=0)
51
+ control = einops.rearrange(control, 'b h w c -> b c h w').clone()
52
+
53
+ if seed == -1:
54
+ seed = random.randint(0, 65535)
55
+ seed_everything(seed)
56
+
57
+ if config.save_memory:
58
+ model.low_vram_shift(is_diffusing=False)
59
+
60
+ cond = {"c_concat": [control], "c_crossattn": [model.get_learned_conditioning([prompt + ', ' + a_prompt] * num_samples)]}
61
+ un_cond = {"c_concat": None if guess_mode else [control], "c_crossattn": [model.get_learned_conditioning([n_prompt] * num_samples)]}
62
+ shape = (4, H // 8, W // 8)
63
+
64
+ if config.save_memory:
65
+ model.low_vram_shift(is_diffusing=True)
66
+
67
+ model.control_scales = [strength * (0.825 ** float(12 - i)) for i in range(13)] if guess_mode else ([strength] * 13)
68
+ # Magic number. IDK why. Perhaps because 0.825**12<0.01 but 0.826**12>0.01
69
+
70
+ samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
71
+ shape, cond, verbose=False, eta=eta,
72
+ unconditional_guidance_scale=scale,
73
+ unconditional_conditioning=un_cond)
74
+
75
+ if config.save_memory:
76
+ model.low_vram_shift(is_diffusing=False)
77
+
78
+ x_samples = model.decode_first_stage(samples)
79
+ x_samples = (einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + 127.5).cpu().numpy().clip(0, 255).astype(np.uint8)
80
+
81
+ results = [x_samples[i] for i in range(num_samples)]
82
+ return [detected_map] + results
83
+
84
+
85
+ block = gr.Blocks().queue()
86
+ with block:
87
+ with gr.Row():
88
+ gr.Markdown("## Control Stable Diffusion with MLSD Lines")
89
+ with gr.Row():
90
+ with gr.Column():
91
+ input_image = gr.Image(source='upload', type="numpy")
92
+ prompt = gr.Textbox(label="Prompt")
93
+ run_button = gr.Button(label="Run")
94
+ num_samples = gr.Slider(label="Images", minimum=1, maximum=12, value=1, step=1)
95
+ seed = gr.Slider(label="Seed", minimum=-1, maximum=2147483647, step=1, value=12345)
96
+ det = gr.Radio(choices=["MLSD", "None"], type="value", value="MLSD", label="Preprocessor")
97
+ with gr.Accordion("Advanced options", open=False):
98
+ value_threshold = gr.Slider(label="Hough value threshold (MLSD)", minimum=0.01, maximum=2.0, value=0.1, step=0.01)
99
+ distance_threshold = gr.Slider(label="Hough distance threshold (MLSD)", minimum=0.01, maximum=20.0, value=0.1, step=0.01)
100
+ image_resolution = gr.Slider(label="Image Resolution", minimum=256, maximum=768, value=512, step=64)
101
+ strength = gr.Slider(label="Control Strength", minimum=0.0, maximum=2.0, value=1.0, step=0.01)
102
+ guess_mode = gr.Checkbox(label='Guess Mode', value=False)
103
+ detect_resolution = gr.Slider(label="Preprocessor Resolution", minimum=128, maximum=1024, value=512, step=1)
104
+ ddim_steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=20, step=1)
105
+ scale = gr.Slider(label="Guidance Scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
106
+ eta = gr.Slider(label="DDIM ETA", minimum=0.0, maximum=1.0, value=1.0, step=0.01)
107
+ a_prompt = gr.Textbox(label="Added Prompt", value='best quality')
108
+ n_prompt = gr.Textbox(label="Negative Prompt", value='lowres, bad anatomy, bad hands, cropped, worst quality')
109
+ with gr.Column():
110
+ result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery").style(grid=2, height='auto')
111
+ ips = [det, input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, guess_mode, strength, scale, seed, eta, value_threshold, distance_threshold]
112
+ run_button.click(fn=process, inputs=ips, outputs=[result_gallery])
113
+
114
+
115
+ block.launch(server_name='0.0.0.0')
CCEdit-main/src/controlnet11/gradio_seg.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from share import *
2
+ import config
3
+
4
+ import cv2
5
+ import einops
6
+ import gradio as gr
7
+ import numpy as np
8
+ import torch
9
+ import random
10
+
11
+ from pytorch_lightning import seed_everything
12
+ from annotator.util import resize_image, HWC3
13
+ from annotator.uniformer import UniformerDetector
14
+ from annotator.oneformer import OneformerCOCODetector, OneformerADE20kDetector
15
+ from cldm.model import create_model, load_state_dict
16
+ from cldm.ddim_hacked import DDIMSampler
17
+
18
+
19
+ preprocessor = None
20
+
21
+ model_name = 'control_v11p_sd15_seg'
22
+ model = create_model(f'./models/{model_name}.yaml').cpu()
23
+ model.load_state_dict(load_state_dict('./models/v1-5-pruned.ckpt', location='cuda'), strict=False)
24
+ model.load_state_dict(load_state_dict(f'./models/{model_name}.pth', location='cuda'), strict=False)
25
+ model = model.cuda()
26
+ ddim_sampler = DDIMSampler(model)
27
+
28
+
29
+ def process(det, input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, guess_mode, strength, scale, seed, eta):
30
+ global preprocessor
31
+
32
+ if det == 'Seg_OFCOCO':
33
+ if not isinstance(preprocessor, OneformerCOCODetector):
34
+ preprocessor = OneformerCOCODetector()
35
+ if det == 'Seg_OFADE20K':
36
+ if not isinstance(preprocessor, OneformerADE20kDetector):
37
+ preprocessor = OneformerADE20kDetector()
38
+ if det == 'Seg_UFADE20K':
39
+ if not isinstance(preprocessor, UniformerDetector):
40
+ preprocessor = UniformerDetector()
41
+
42
+ with torch.no_grad():
43
+ input_image = HWC3(input_image)
44
+
45
+ if det == 'None':
46
+ detected_map = input_image.copy()
47
+ else:
48
+ detected_map = preprocessor(resize_image(input_image, detect_resolution))
49
+ detected_map = HWC3(detected_map)
50
+
51
+ img = resize_image(input_image, image_resolution)
52
+ H, W, C = img.shape
53
+
54
+ detected_map = cv2.resize(detected_map, (W, H), interpolation=cv2.INTER_LINEAR)
55
+
56
+ control = torch.from_numpy(detected_map.copy()).float().cuda() / 255.0
57
+ control = torch.stack([control for _ in range(num_samples)], dim=0)
58
+ control = einops.rearrange(control, 'b h w c -> b c h w').clone()
59
+
60
+ if seed == -1:
61
+ seed = random.randint(0, 65535)
62
+ seed_everything(seed)
63
+
64
+ if config.save_memory:
65
+ model.low_vram_shift(is_diffusing=False)
66
+
67
+ cond = {"c_concat": [control], "c_crossattn": [model.get_learned_conditioning([prompt + ', ' + a_prompt] * num_samples)]}
68
+ un_cond = {"c_concat": None if guess_mode else [control], "c_crossattn": [model.get_learned_conditioning([n_prompt] * num_samples)]}
69
+ shape = (4, H // 8, W // 8)
70
+
71
+ if config.save_memory:
72
+ model.low_vram_shift(is_diffusing=True)
73
+
74
+ model.control_scales = [strength * (0.825 ** float(12 - i)) for i in range(13)] if guess_mode else ([strength] * 13)
75
+ # Magic number. IDK why. Perhaps because 0.825**12<0.01 but 0.826**12>0.01
76
+
77
+ samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
78
+ shape, cond, verbose=False, eta=eta,
79
+ unconditional_guidance_scale=scale,
80
+ unconditional_conditioning=un_cond)
81
+
82
+ if config.save_memory:
83
+ model.low_vram_shift(is_diffusing=False)
84
+
85
+ x_samples = model.decode_first_stage(samples)
86
+ x_samples = (einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + 127.5).cpu().numpy().clip(0, 255).astype(np.uint8)
87
+
88
+ results = [x_samples[i] for i in range(num_samples)]
89
+ return [detected_map] + results
90
+
91
+
92
+ block = gr.Blocks().queue()
93
+ with block:
94
+ with gr.Row():
95
+ gr.Markdown("## Control Stable Diffusion with Semantic Segmentation")
96
+ with gr.Row():
97
+ with gr.Column():
98
+ input_image = gr.Image(source='upload', type="numpy")
99
+ prompt = gr.Textbox(label="Prompt")
100
+ run_button = gr.Button(label="Run")
101
+ num_samples = gr.Slider(label="Images", minimum=1, maximum=12, value=1, step=1)
102
+ seed = gr.Slider(label="Seed", minimum=-1, maximum=2147483647, step=1, value=12345)
103
+ det = gr.Radio(choices=["Seg_OFADE20K", "Seg_OFCOCO", "Seg_UFADE20K", "None"], type="value", value="Seg_OFADE20K", label="Preprocessor")
104
+ with gr.Accordion("Advanced options", open=False):
105
+ image_resolution = gr.Slider(label="Image Resolution", minimum=256, maximum=768, value=512, step=64)
106
+ strength = gr.Slider(label="Control Strength", minimum=0.0, maximum=2.0, value=1.0, step=0.01)
107
+ guess_mode = gr.Checkbox(label='Guess Mode', value=False)
108
+ detect_resolution = gr.Slider(label="Preprocessor Resolution", minimum=128, maximum=1024, value=512, step=1)
109
+ ddim_steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=20, step=1)
110
+ scale = gr.Slider(label="Guidance Scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
111
+ eta = gr.Slider(label="DDIM ETA", minimum=0.0, maximum=1.0, value=1.0, step=0.01)
112
+ a_prompt = gr.Textbox(label="Added Prompt", value='best quality')
113
+ n_prompt = gr.Textbox(label="Negative Prompt", value='lowres, bad anatomy, bad hands, cropped, worst quality')
114
+ with gr.Column():
115
+ result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery").style(grid=2, height='auto')
116
+ ips = [det, input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, detect_resolution, ddim_steps, guess_mode, strength, scale, seed, eta]
117
+ run_button.click(fn=process, inputs=ips, outputs=[result_gallery])
118
+
119
+
120
+ block.launch(server_name='0.0.0.0')
CCEdit-main/src/controlnet11/gradio_shuffle.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from share import *
2
+ import config
3
+
4
+ import cv2
5
+ import einops
6
+ import gradio as gr
7
+ import numpy as np
8
+ import torch
9
+ import random
10
+
11
+ from pytorch_lightning import seed_everything
12
+ from annotator.util import resize_image, HWC3
13
+ from annotator.shuffle import ContentShuffleDetector
14
+ from cldm.model import create_model, load_state_dict
15
+ from cldm.ddim_hacked import DDIMSampler
16
+
17
+
18
+ model_name = 'control_v11e_sd15_shuffle'
19
+ model = create_model(f'./models/{model_name}.yaml').cpu()
20
+ model.load_state_dict(load_state_dict('./models/v1-5-pruned.ckpt', location='cuda'), strict=False)
21
+ model.load_state_dict(load_state_dict(f'./models/{model_name}.pth', location='cuda'), strict=False)
22
+ model = model.cuda()
23
+ ddim_sampler = DDIMSampler(model)
24
+
25
+ preprocessor = ContentShuffleDetector()
26
+
27
+
28
+ def process(det, input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, ddim_steps, strength, scale, seed, eta):
29
+ with torch.no_grad():
30
+ input_image = HWC3(input_image)
31
+ detected_map = input_image.copy()
32
+ img = resize_image(input_image, image_resolution)
33
+ H, W, C = img.shape
34
+
35
+ if det == "Shuffle":
36
+ np.random.seed(seed)
37
+ detected_map = preprocessor(detected_map, w=W, h=H, f=256)
38
+ else:
39
+ detected_map = img.copy()
40
+
41
+ control = torch.from_numpy(detected_map.copy()).float().cuda() / 255.0
42
+ control = torch.stack([control for _ in range(num_samples)], dim=0)
43
+ control = einops.rearrange(control, 'b h w c -> b c h w').clone()
44
+
45
+ if seed == -1:
46
+ seed = random.randint(0, 65535)
47
+ seed_everything(seed)
48
+
49
+ if config.save_memory:
50
+ model.low_vram_shift(is_diffusing=False)
51
+
52
+ cond = {"c_concat": [control], "c_crossattn": [model.get_learned_conditioning([prompt + ', ' + a_prompt] * num_samples)]}
53
+ un_cond = {"c_concat": None, "c_crossattn": [model.get_learned_conditioning([n_prompt] * num_samples)]}
54
+ shape = (4, H // 8, W // 8)
55
+
56
+ if config.save_memory:
57
+ model.low_vram_shift(is_diffusing=True)
58
+
59
+ model.control_scales = [strength] * 13
60
+ samples, intermediates = ddim_sampler.sample(ddim_steps, num_samples,
61
+ shape, cond, verbose=False, eta=eta,
62
+ unconditional_guidance_scale=scale,
63
+ unconditional_conditioning=un_cond)
64
+
65
+ if config.save_memory:
66
+ model.low_vram_shift(is_diffusing=False)
67
+
68
+ x_samples = model.decode_first_stage(samples)
69
+ x_samples = (einops.rearrange(x_samples, 'b c h w -> b h w c') * 127.5 + 127.5).cpu().numpy().clip(0, 255).astype(np.uint8)
70
+
71
+ results = [x_samples[i] for i in range(num_samples)]
72
+
73
+ if det == "Shuffle":
74
+ return [detected_map] + results
75
+ else:
76
+ return results
77
+
78
+
79
+ block = gr.Blocks().queue()
80
+ with block:
81
+ with gr.Row():
82
+ gr.Markdown("## Control Stable Diffusion with Content Shuffle")
83
+ with gr.Row():
84
+ with gr.Column():
85
+ input_image = gr.Image(source='upload', type="numpy")
86
+ prompt = gr.Textbox(label="Prompt")
87
+ run_button = gr.Button(label="Run")
88
+ num_samples = gr.Slider(label="Images", minimum=1, maximum=12, value=1, step=1)
89
+ seed = gr.Slider(label="Seed", minimum=-1, maximum=2147483647, step=1, value=12345)
90
+ det = gr.Radio(choices=["Shuffle", "None"], type="value", value="None", label="Preprocessor")
91
+ with gr.Accordion("Advanced options", open=False):
92
+ image_resolution = gr.Slider(label="Image Resolution", minimum=256, maximum=768, value=512, step=64)
93
+ strength = gr.Slider(label="Control Strength", minimum=0.0, maximum=2.0, value=1.0, step=0.01)
94
+ ddim_steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=20, step=1)
95
+ scale = gr.Slider(label="Guidance Scale", minimum=0.1, maximum=30.0, value=9.0, step=0.1)
96
+ eta = gr.Slider(label="DDIM ETA", minimum=0.0, maximum=1.0, value=1.0, step=0.01)
97
+ a_prompt = gr.Textbox(label="Added Prompt", value='best quality')
98
+ n_prompt = gr.Textbox(label="Negative Prompt", value='lowres, bad anatomy, bad hands, cropped, worst quality')
99
+ with gr.Column():
100
+ result_gallery = gr.Gallery(label='Output', show_label=False, elem_id="gallery").style(grid=2, height='auto')
101
+ ips = [det, input_image, prompt, a_prompt, n_prompt, num_samples, image_resolution, ddim_steps, strength, scale, seed, eta]
102
+ run_button.click(fn=process, inputs=ips, outputs=[result_gallery])
103
+
104
+
105
+ block.launch(server_name='0.0.0.0')
CCEdit-main/src/controlnet11/models/cldm_v15.yaml ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ target: cldm.cldm.ControlLDM
3
+ params:
4
+ linear_start: 0.00085
5
+ linear_end: 0.0120
6
+ num_timesteps_cond: 1
7
+ log_every_t: 200
8
+ timesteps: 1000
9
+ first_stage_key: "jpg"
10
+ cond_stage_key: "txt"
11
+ control_key: "hint"
12
+ image_size: 64
13
+ channels: 4
14
+ cond_stage_trainable: false
15
+ conditioning_key: crossattn
16
+ monitor: val/loss_simple_ema
17
+ scale_factor: 0.18215
18
+ use_ema: False
19
+ only_mid_control: False
20
+
21
+ control_stage_config:
22
+ target: cldm.cldm.ControlNet
23
+ params:
24
+ image_size: 32 # unused
25
+ in_channels: 4
26
+ hint_channels: 3
27
+ model_channels: 320
28
+ attention_resolutions: [ 4, 2, 1 ]
29
+ num_res_blocks: 2
30
+ channel_mult: [ 1, 2, 4, 4 ]
31
+ num_heads: 8
32
+ use_spatial_transformer: True
33
+ transformer_depth: 1
34
+ context_dim: 768
35
+ use_checkpoint: True
36
+ legacy: False
37
+
38
+ unet_config:
39
+ target: cldm.cldm.ControlledUnetModel
40
+ params:
41
+ image_size: 32 # unused
42
+ in_channels: 4
43
+ out_channels: 4
44
+ model_channels: 320
45
+ attention_resolutions: [ 4, 2, 1 ]
46
+ num_res_blocks: 2
47
+ channel_mult: [ 1, 2, 4, 4 ]
48
+ num_heads: 8
49
+ use_spatial_transformer: True
50
+ transformer_depth: 1
51
+ context_dim: 768
52
+ use_checkpoint: True
53
+ legacy: False
54
+
55
+ first_stage_config:
56
+ target: ldm.models.autoencoder.AutoencoderKL
57
+ params:
58
+ embed_dim: 4
59
+ monitor: val/rec_loss
60
+ ddconfig:
61
+ double_z: true
62
+ z_channels: 4
63
+ resolution: 256
64
+ in_channels: 3
65
+ out_ch: 3
66
+ ch: 128
67
+ ch_mult:
68
+ - 1
69
+ - 2
70
+ - 4
71
+ - 4
72
+ num_res_blocks: 2
73
+ attn_resolutions: []
74
+ dropout: 0.0
75
+ lossconfig:
76
+ target: torch.nn.Identity
77
+
78
+ cond_stage_config:
79
+ target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
CCEdit-main/src/controlnet11/models/cldm_v15_avg_pool.yaml ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ target: cldm.cldm.ControlLDM
3
+ params:
4
+ linear_start: 0.00085
5
+ linear_end: 0.0120
6
+ num_timesteps_cond: 1
7
+ log_every_t: 200
8
+ timesteps: 1000
9
+ first_stage_key: "jpg"
10
+ cond_stage_key: "txt"
11
+ control_key: "hint"
12
+ image_size: 64
13
+ channels: 4
14
+ cond_stage_trainable: false
15
+ conditioning_key: crossattn
16
+ monitor: val/loss_simple_ema
17
+ scale_factor: 0.18215
18
+ use_ema: False
19
+ only_mid_control: False
20
+ global_average_pooling: True
21
+
22
+ control_stage_config:
23
+ target: cldm.cldm.ControlNet
24
+ params:
25
+ image_size: 32 # unused
26
+ in_channels: 4
27
+ hint_channels: 3
28
+ model_channels: 320
29
+ attention_resolutions: [ 4, 2, 1 ]
30
+ num_res_blocks: 2
31
+ channel_mult: [ 1, 2, 4, 4 ]
32
+ num_heads: 8
33
+ use_spatial_transformer: True
34
+ transformer_depth: 1
35
+ context_dim: 768
36
+ use_checkpoint: True
37
+ legacy: False
38
+
39
+ unet_config:
40
+ target: cldm.cldm.ControlledUnetModel
41
+ params:
42
+ image_size: 32 # unused
43
+ in_channels: 4
44
+ out_channels: 4
45
+ model_channels: 320
46
+ attention_resolutions: [ 4, 2, 1 ]
47
+ num_res_blocks: 2
48
+ channel_mult: [ 1, 2, 4, 4 ]
49
+ num_heads: 8
50
+ use_spatial_transformer: True
51
+ transformer_depth: 1
52
+ context_dim: 768
53
+ use_checkpoint: True
54
+ legacy: False
55
+
56
+ first_stage_config:
57
+ target: ldm.models.autoencoder.AutoencoderKL
58
+ params:
59
+ embed_dim: 4
60
+ monitor: val/rec_loss
61
+ ddconfig:
62
+ double_z: true
63
+ z_channels: 4
64
+ resolution: 256
65
+ in_channels: 3
66
+ out_ch: 3
67
+ ch: 128
68
+ ch_mult:
69
+ - 1
70
+ - 2
71
+ - 4
72
+ - 4
73
+ num_res_blocks: 2
74
+ attn_resolutions: []
75
+ dropout: 0.0
76
+ lossconfig:
77
+ target: torch.nn.Identity
78
+
79
+ cond_stage_config:
80
+ target: ldm.modules.encoders.modules.FrozenCLIPEmbedder
CCEdit-main/src/controlnet11/models/cldm_v21.yaml ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ model:
2
+ target: cldm.cldm.ControlLDM
3
+ params:
4
+ linear_start: 0.00085
5
+ linear_end: 0.0120
6
+ num_timesteps_cond: 1
7
+ log_every_t: 200
8
+ timesteps: 1000
9
+ first_stage_key: "jpg"
10
+ cond_stage_key: "txt"
11
+ control_key: "hint"
12
+ image_size: 64
13
+ channels: 4
14
+ cond_stage_trainable: false
15
+ conditioning_key: crossattn
16
+ monitor: val/loss_simple_ema
17
+ scale_factor: 0.18215
18
+ use_ema: False
19
+ only_mid_control: False
20
+
21
+ control_stage_config:
22
+ target: cldm.cldm.ControlNet
23
+ params:
24
+ use_checkpoint: True
25
+ image_size: 32 # unused
26
+ in_channels: 4
27
+ hint_channels: 3
28
+ model_channels: 320
29
+ attention_resolutions: [ 4, 2, 1 ]
30
+ num_res_blocks: 2
31
+ channel_mult: [ 1, 2, 4, 4 ]
32
+ num_head_channels: 64 # need to fix for flash-attn
33
+ use_spatial_transformer: True
34
+ use_linear_in_transformer: True
35
+ transformer_depth: 1
36
+ context_dim: 1024
37
+ legacy: False
38
+
39
+ unet_config:
40
+ target: cldm.cldm.ControlledUnetModel
41
+ params:
42
+ use_checkpoint: True
43
+ image_size: 32 # unused
44
+ in_channels: 4
45
+ out_channels: 4
46
+ model_channels: 320
47
+ attention_resolutions: [ 4, 2, 1 ]
48
+ num_res_blocks: 2
49
+ channel_mult: [ 1, 2, 4, 4 ]
50
+ num_head_channels: 64 # need to fix for flash-attn
51
+ use_spatial_transformer: True
52
+ use_linear_in_transformer: True
53
+ transformer_depth: 1
54
+ context_dim: 1024
55
+ legacy: False
56
+
57
+ first_stage_config:
58
+ target: ldm.models.autoencoder.AutoencoderKL
59
+ params:
60
+ embed_dim: 4
61
+ monitor: val/rec_loss
62
+ ddconfig:
63
+ #attn_type: "vanilla-xformers"
64
+ double_z: true
65
+ z_channels: 4
66
+ resolution: 256
67
+ in_channels: 3
68
+ out_ch: 3
69
+ ch: 128
70
+ ch_mult:
71
+ - 1
72
+ - 2
73
+ - 4
74
+ - 4
75
+ num_res_blocks: 2
76
+ attn_resolutions: []
77
+ dropout: 0.0
78
+ lossconfig:
79
+ target: torch.nn.Identity
80
+
81
+ cond_stage_config:
82
+ target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder
83
+ params:
84
+ freeze: True
85
+ layer: "penultimate"