otroivan MaxReimann commited on
Commit
5066197
·
0 Parent(s):

Duplicate from MaxReimann/Whitebox-Style-Transfer-Editing

Browse files

Co-authored-by: Max Reimann <MaxReimann@users.noreply.huggingface.co>

Files changed (48) hide show
  1. .gitattributes +37 -0
  2. .github/workflows/file_size_check.yml +16 -0
  3. .github/workflows/push_to_huggingface.yml +19 -0
  4. .gitmodules +4 -0
  5. README.md +47 -0
  6. Whitebox_style_transfer.py +312 -0
  7. demo_config.py +1 -0
  8. images/content/colibri.jpeg +3 -0
  9. images/content/portrait.jpeg +3 -0
  10. images/content/tubingen.jpeg +3 -0
  11. images/screen_wise_demo.jpg +3 -0
  12. images/style/candy.jpg +3 -0
  13. images/style/feathers.jpg +3 -0
  14. images/style/mosaic.jpg +3 -0
  15. images/style/starry_night.jpg +3 -0
  16. images/style/the_scream.jpg +3 -0
  17. images/style/udnie.jpg +3 -0
  18. images/style/wave.jpg +3 -0
  19. images/style/woman_with_hat.jpg +3 -0
  20. pages/1_🎨_Apply_preset.py +121 -0
  21. pages/2_🖌️_Local_edits.py +242 -0
  22. pages/3_📖_Readme.py +32 -0
  23. precomputed/minimal_pipeline/colibri/starry_night/input.png +3 -0
  24. precomputed/minimal_pipeline/colibri/starry_night/vp.pt +3 -0
  25. precomputed/minimal_pipeline/colibri/the_scream/input.png +3 -0
  26. precomputed/minimal_pipeline/colibri/the_scream/vp.pt +3 -0
  27. precomputed/minimal_pipeline/colibri/wave/input.png +3 -0
  28. precomputed/minimal_pipeline/colibri/wave/vp.pt +3 -0
  29. precomputed/minimal_pipeline/colibri/woman_with_hat/input.png +3 -0
  30. precomputed/minimal_pipeline/colibri/woman_with_hat/vp.pt +3 -0
  31. precomputed/minimal_pipeline/portrait/starry_night/input.png +3 -0
  32. precomputed/minimal_pipeline/portrait/starry_night/vp.pt +3 -0
  33. precomputed/minimal_pipeline/portrait/the_scream/input.png +3 -0
  34. precomputed/minimal_pipeline/portrait/the_scream/vp.pt +3 -0
  35. precomputed/minimal_pipeline/portrait/wave/input.png +3 -0
  36. precomputed/minimal_pipeline/portrait/wave/vp.pt +3 -0
  37. precomputed/minimal_pipeline/portrait/woman_with_hat/input.png +3 -0
  38. precomputed/minimal_pipeline/portrait/woman_with_hat/vp.pt +3 -0
  39. precomputed/minimal_pipeline/tubingen/starry_night/input.png +3 -0
  40. precomputed/minimal_pipeline/tubingen/starry_night/vp.pt +3 -0
  41. precomputed/minimal_pipeline/tubingen/the_scream/input.png +3 -0
  42. precomputed/minimal_pipeline/tubingen/the_scream/vp.pt +3 -0
  43. precomputed/minimal_pipeline/tubingen/wave/input.png +3 -0
  44. precomputed/minimal_pipeline/tubingen/wave/vp.pt +3 -0
  45. precomputed/minimal_pipeline/tubingen/woman_with_hat/input.png +3 -0
  46. precomputed/minimal_pipeline/tubingen/woman_with_hat/vp.pt +3 -0
  47. requirements.txt +13 -0
  48. wise +1 -0
.gitattributes ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.png filter=lfs diff=lfs merge=lfs -text
25
+ *.jpg filter=lfs diff=lfs merge=lfs -text
26
+ *.jpeg filter=lfs diff=lfs merge=lfs -text
27
+ *.rar filter=lfs diff=lfs merge=lfs -text
28
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
29
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
30
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
31
+ *.tflite filter=lfs diff=lfs merge=lfs -text
32
+ *.tgz filter=lfs diff=lfs merge=lfs -text
33
+ *.wasm filter=lfs diff=lfs merge=lfs -text
34
+ *.xz filter=lfs diff=lfs merge=lfs -text
35
+ *.zip filter=lfs diff=lfs merge=lfs -text
36
+ *.zst filter=lfs diff=lfs merge=lfs -text
37
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
.github/workflows/file_size_check.yml ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Check file size
2
+ on: # or directly `on: [push]` to run the action on every push on any branch
3
+ pull_request:
4
+ branches: [main]
5
+
6
+ # to run this workflow manually from the Actions tab
7
+ workflow_dispatch:
8
+
9
+ jobs:
10
+ sync-to-hub:
11
+ runs-on: ubuntu-latest
12
+ steps:
13
+ - name: Check large files
14
+ uses: ActionsDesk/lfs-warning@v2.0
15
+ with:
16
+ filesizelimit: 10485760 # this is 10MB so we can sync to HF Spaces
.github/workflows/push_to_huggingface.yml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: Sync to Hugging Face hub
2
+ on:
3
+ push:
4
+ branches: [main]
5
+
6
+ # to run this workflow manually from the Actions tab
7
+ workflow_dispatch:
8
+
9
+ jobs:
10
+ sync-to-hub:
11
+ runs-on: ubuntu-latest
12
+ steps:
13
+ - uses: actions/checkout@v3
14
+ with:
15
+ fetch-depth: 0
16
+ - name: Push to hub
17
+ env:
18
+ HF_TOKEN: ${{ secrets.HF_TOKEN }}
19
+ run: git push https://MaxReimann:$HF_TOKEN@huggingface.co/spaces/MaxReimann/Whitebox-Style-Transfer-Editing main
.gitmodules ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ [submodule "wise"]
2
+ path = wise
3
+ url = https://github.com/winfried-ripken/wise
4
+ branch = minimal_pipeline
README.md ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: White-box Style Transfer Editing (WISE)
3
+ emoji: 🎨
4
+ colorFrom: pink
5
+ colorTo: red
6
+ sdk: streamlit
7
+ sdk_version: 1.10.0
8
+ app_file: Whitebox_style_transfer.py
9
+ tags:
10
+ - Style Transfer
11
+ - Image Synthesis
12
+ - Editing
13
+ - Painting
14
+ pinned: false
15
+ license: mit
16
+ duplicated_from: MaxReimann/Whitebox-Style-Transfer-Editing
17
+ ---
18
+ # White-box Style Transfer Editing (WISE) Demo
19
+
20
+ This app demonstrates the editing capabilities of the [White-box Style Transfer Editing (WISE) framework](https://github.com/winfried-ripken/wise).
21
+ It optimizes the parameters of classical image processing filters to match a given style image.
22
+ After optimization, parameters can be tuned by hand to achieve a desired look.
23
+
24
+
25
+ ### How does it work?
26
+ We provide a small stylization effect that contains several filters such as bump mapping or edge enhancement that can be optimized. The optimization yields so-called parameter masks, which contain per-pixel parameter settings for each filter.
27
+
28
+ ## 🚀 Try it out 🚀
29
+ **Our demo is now on huggingface: [huggingface/Whitebox-Style-Transfer-Editing](https://huggingface.co/spaces/MaxReimann/Whitebox-Style-Transfer-Editing)**
30
+
31
+ ![Streamlit Screenshot](images/screen_wise_demo.jpg?raw=true "WISE Editing Demo")
32
+
33
+ To run **locally**, clone the repo recursively and install the dependencies in requirements.txt. Set HUGGINGFACE to false in demo_config.py.
34
+ Then run the streamlit app using `streamlit run Whitebox_style_transfer.py`
35
+
36
+
37
+
38
+ ## Links & Paper
39
+ [Project page](https://ivpg.hpi3d.de/wise/),
40
+ [arxiv link](https://arxiv.org/abs/2207.14606),
41
+ [framework code](https://github.com/winfried-ripken/wise)
42
+
43
+ "WISE: Whitebox Image Stylization by Example-based Learning", by Winfried Lötzsch*, Max Reimann*, Martin Büßemeyer, Amir Semmo, Jürgen Döllner, Matthias Trapp, in ECCV 2022
44
+
45
+ ### Further notes
46
+ Pull Requests and further improvements welcome.
47
+ Please note that the shown effect is a minimal pipeline in terms of stylization capability, the much more feature-rich oilpaint and watercolor pipelines we show in our ECCV paper cannot be open-sourced due to IP reasons.
Whitebox_style_transfer.py ADDED
@@ -0,0 +1,312 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import base64
2
+ import datetime
3
+ import os
4
+ import sys
5
+ from io import BytesIO
6
+ from pathlib import Path
7
+ import numpy as np
8
+ import requests
9
+ import torch
10
+ import torch.nn.functional as F
11
+ from PIL import Image
12
+
13
+ PACKAGE_PARENT = 'wise'
14
+ SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
15
+ sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
16
+
17
+ import streamlit as st
18
+ from streamlit.logger import get_logger
19
+ from st_click_detector import click_detector
20
+ import streamlit.components.v1 as components
21
+ from streamlit.source_util import get_pages
22
+ from streamlit_extras.switch_page_button import switch_page
23
+
24
+ from demo_config import HUGGING_FACE
25
+ from parameter_optimization.parametric_styletransfer import single_optimize
26
+ from parameter_optimization.parametric_styletransfer import CONFIG as ST_CONFIG
27
+ from parameter_optimization.strotss_org import strotss, pil_resize_long_edge_to
28
+ import helpers.session_state as session_state
29
+ from helpers import torch_to_np, np_to_torch
30
+ from effects import get_default_settings, MinimalPipelineEffect
31
+
32
+ st.set_page_config(layout="wide")
33
+ BASE_URL = "https://ivpg.hpi3d.de/wise/wise-demo/images/"
34
+ LOGGER = get_logger(__name__)
35
+
36
+ effect_type = "minimal_pipeline"
37
+
38
+ if "click_counter" not in st.session_state:
39
+ st.session_state.click_counter = 1
40
+
41
+ if "action" not in st.session_state:
42
+ st.session_state["action"] = ""
43
+
44
+ content_urls = [
45
+ {
46
+ "name": "Portrait", "id": "portrait",
47
+ "src": BASE_URL + "/content/portrait.jpeg"
48
+ },
49
+ {
50
+ "name": "Tuebingen", "id": "tubingen",
51
+ "src": BASE_URL + "/content/tubingen.jpeg"
52
+ },
53
+ {
54
+ "name": "Colibri", "id": "colibri",
55
+ "src": BASE_URL + "/content/colibri.jpeg"
56
+ }
57
+ ]
58
+
59
+ style_urls = [
60
+ {
61
+ "name": "Starry Night, Van Gogh", "id": "starry_night",
62
+ "src": BASE_URL + "/style/starry_night.jpg"
63
+ },
64
+ {
65
+ "name": "The Scream, Edward Munch", "id": "the_scream",
66
+ "src": BASE_URL + "/style/the_scream.jpg"
67
+ },
68
+ {
69
+ "name": "The Great Wave, Ukiyo-e", "id": "wave",
70
+ "src": BASE_URL + "/style/wave.jpg"
71
+ },
72
+ {
73
+ "name": "Woman with Hat, Henry Matisse", "id": "woman_with_hat",
74
+ "src": BASE_URL + "/style/woman_with_hat.jpg"
75
+ }
76
+ ]
77
+
78
+
79
+ def last_image_clicked(type="content", action=None, ):
80
+ kw = "last_image_clicked" + "_" + type
81
+ if action:
82
+ session_state.get(**{kw: action})
83
+ elif kw not in session_state.get():
84
+ return None
85
+ else:
86
+ return session_state.get()[kw]
87
+
88
+
89
+ @st.cache
90
+ def _retrieve_from_id(clicked, urls):
91
+ src = [x["src"] for x in urls if x["id"] == clicked][0]
92
+ img = Image.open(requests.get(src, stream=True).raw)
93
+ return img, src
94
+
95
+
96
+ def store_img_from_id(clicked, urls, imgtype):
97
+ img, src = _retrieve_from_id(clicked, urls)
98
+ session_state.get(**{f"{imgtype}_im": img, f"{imgtype}_render_src": src, f"{imgtype}_id": clicked})
99
+
100
+
101
+ def img_choice_panel(imgtype, urls, default_choice, expanded):
102
+ with st.expander(f"Select {imgtype} image:", expanded=expanded):
103
+ html_code = '<div class="column" style="display: flex; flex-wrap: wrap; padding: 0 4px;">'
104
+ for url in urls:
105
+ html_code += f"<a href='#' id='{url['id']}' style='padding: 0px 5px'><img height='160px' style='margin-top: 8px;' src='{url['src']}'></a>"
106
+ html_code += "</div>"
107
+ clicked = click_detector(html_code)
108
+
109
+ if not clicked and st.session_state["action"] not in ("uploaded", "switch_page_from_local_edits", "switch_page_from_presets", "slider_change", "reset"): # default val
110
+ store_img_from_id(default_choice, urls, imgtype)
111
+
112
+ st.write("OR: ")
113
+
114
+ with st.form(imgtype + "-form", clear_on_submit=True):
115
+ uploaded_im = st.file_uploader(f"Load {imgtype} image:", type=["png", "jpg"], )
116
+ upload_pressed = st.form_submit_button("Upload")
117
+
118
+ if upload_pressed and uploaded_im is not None:
119
+ img = Image.open(uploaded_im)
120
+ buffered = BytesIO()
121
+ img.save(buffered, format="JPEG")
122
+ encoded = base64.b64encode(buffered.getvalue()).decode()
123
+ # session_state.get(uploaded_im=img, content_render_src=f"data:image/jpeg;base64,{encoded}")
124
+ session_state.get(**{f"{imgtype}_im": img, f"{imgtype}_render_src": f"data:image/jpeg;base64,{encoded}",
125
+ f"{imgtype}_id": "uploaded"})
126
+ st.session_state["action"] = "uploaded"
127
+ st.write("uploaded.")
128
+
129
+ last_clicked = last_image_clicked(type=imgtype)
130
+ print("last_clicked", last_clicked, "clicked", clicked, "action", st.session_state["action"] )
131
+ if not upload_pressed and clicked != "": # trigger when no file uploaded
132
+ if last_clicked != clicked: # only activate when content was actually clicked
133
+ store_img_from_id(clicked, urls, imgtype)
134
+ last_image_clicked(type=imgtype, action=clicked)
135
+ st.session_state["action"] = "clicked"
136
+ st.session_state.click_counter += 1 # hack to get page to reload at top
137
+
138
+ state = session_state.get()
139
+ st.sidebar.write(f'Selected {imgtype} image:')
140
+ st.sidebar.markdown(f'<img src="{state[f"{imgtype}_render_src"]}" width=240px></img>', unsafe_allow_html=True)
141
+
142
+
143
+ def optimize(effect, preset, result_image_placeholder):
144
+ content = st.session_state["Content_im"]
145
+ style = st.session_state["Style_im"]
146
+ result_image_placeholder.text("<- Custom content/style needs to be style transferred")
147
+ optimize_button = st.sidebar.button("Optimize Style Transfer")
148
+ if optimize_button:
149
+ if HUGGING_FACE:
150
+ result_image_placeholder.warning("NST optimization is currently disabled in this HuggingFace Space because it takes ~5min to optimize. To try it out, please clone the repo and change the huggingface variable in demo_config.py")
151
+ st.stop()
152
+
153
+ result_image_placeholder.text("Executing NST to create reference image..")
154
+ base_dir = f"result/{datetime.datetime.now().strftime(r'%Y-%m-%d %H.%Mh %Ss')}"
155
+ os.makedirs(base_dir)
156
+ with st.spinner(text="Running NST"):
157
+ reference = strotss(pil_resize_long_edge_to(content, 1024),
158
+ pil_resize_long_edge_to(style, 1024), content_weight=16.0,
159
+ device=torch.device("cuda"), space="uniform")
160
+ progress_bar = result_image_placeholder.progress(0.0)
161
+ ref_save_path = os.path.join(base_dir, "reference.jpg")
162
+ content_save_path = os.path.join(base_dir, "content.jpg")
163
+ resize_to = 720
164
+ reference = pil_resize_long_edge_to(reference, resize_to)
165
+ reference.save(ref_save_path)
166
+ content.save(content_save_path)
167
+ ST_CONFIG["n_iterations"] = 300
168
+ with st.spinner(text="Optimizing parameters.."):
169
+ vp, content_img_cuda = single_optimize(effect, preset, "l1", content_save_path, str(ref_save_path),
170
+ write_video=False, base_dir=base_dir,
171
+ iter_callback=lambda i: progress_bar.progress(
172
+ float(i) / ST_CONFIG["n_iterations"]))
173
+ return content_img_cuda.detach(), vp.cuda().detach()
174
+ else:
175
+ if not "result_vp" in st.session_state:
176
+ st.stop()
177
+ else:
178
+ return st.session_state["effect_input"], st.session_state["result_vp"]
179
+
180
+
181
+ @st.cache(hash_funcs={MinimalPipelineEffect: id})
182
+ def create_effect():
183
+ effect, preset, param_set = get_default_settings(effect_type)
184
+ effect.enable_checkpoints()
185
+ effect.cuda()
186
+ return effect, preset
187
+
188
+
189
+ def load_visual_params(vp_path: str, img_org: Image, org_cuda: torch.Tensor, effect) -> torch.Tensor:
190
+ if Path(vp_path).exists():
191
+ vp = torch.load(vp_path).detach().clone()
192
+ vp = F.interpolate(vp, (img_org.height, img_org.width))
193
+ if len(effect.vpd.vp_ranges) == vp.shape[1]:
194
+ return vp
195
+ # use preset and save it
196
+ vp = effect.vpd.preset_tensor(preset, org_cuda, add_local_dims=True)
197
+ torch.save(vp, vp_path)
198
+ return vp
199
+
200
+
201
+ # @st.cache(hash_funcs={torch.Tensor: id})
202
+ @st.experimental_memo
203
+ def load_params(content_id, style_id):#, effect):
204
+ preoptim_param_path = os.path.join("precomputed", effect_type, content_id, style_id)
205
+ img_org = Image.open(os.path.join(preoptim_param_path, "input.png"))
206
+ content_cuda = np_to_torch(img_org).cuda()
207
+ vp_path = os.path.join(preoptim_param_path, "vp.pt")
208
+ vp = load_visual_params(vp_path, img_org, content_cuda, effect)
209
+ return content_cuda, vp
210
+
211
+
212
+ def render_effect(effect, content_cuda, vp):
213
+ with torch.no_grad():
214
+ result_cuda = effect(content_cuda, vp)
215
+ img_res = Image.fromarray((torch_to_np(result_cuda) * 255.0).astype(np.uint8))
216
+ return img_res
217
+
218
+
219
+ result_container = st.container()
220
+ coll1, coll2 = result_container.columns([3,2])
221
+ coll1.header("Result")
222
+ coll2.header("Global Edits")
223
+ result_image_placeholder = coll1.empty()
224
+ result_image_placeholder.markdown("## loading..")
225
+
226
+ img_choice_panel("Content", content_urls, "portrait", expanded=True)
227
+ img_choice_panel("Style", style_urls, "starry_night", expanded=True)
228
+
229
+ state = session_state.get()
230
+ content_id = state["Content_id"]
231
+ style_id = state["Style_id"]
232
+
233
+ effect, preset = create_effect()
234
+
235
+ print("content id, style id", content_id, style_id )
236
+ if st.session_state["action"] == "uploaded":
237
+ content_img, _vp = optimize(effect, preset, result_image_placeholder)
238
+ elif st.session_state["action"] in ("switch_page_from_local_edits", "switch_page_from_presets", "slider_change") or \
239
+ content_id == "uploaded" or style_id == "uploaded":
240
+ print("restore param")
241
+ _vp = st.session_state["result_vp"]
242
+ content_img = st.session_state["effect_input"]
243
+ else:
244
+ print("load_params")
245
+ content_img, _vp = load_params(content_id, style_id)#, effect)
246
+
247
+ vp = torch.clone(_vp)
248
+
249
+
250
+ def reset_params(means, names):
251
+ for i, name in enumerate(names):
252
+ st.session_state["slider_" + name] = means[i]
253
+
254
+ def on_slider():
255
+ st.session_state["action"] = "slider_change"
256
+
257
+
258
+ with coll2:
259
+ show_params_names = [ 'bumpScale', "bumpOpacity", "contourOpacity"]
260
+ display_means = []
261
+ def create_slider(name):
262
+ mean = torch.mean(vp[:, effect.vpd.name2idx[name]]).item()
263
+ display_mean = mean + 0.5
264
+ display_means.append(display_mean)
265
+ if "slider_" + name not in st.session_state or st.session_state["action"] != "slider_change":
266
+ st.session_state["slider_" + name] = display_mean
267
+ slider = st.slider(f"Mean {name}: ", 0.0, 1.0, step=0.05, key="slider_" + name, on_change=on_slider)
268
+ vp[:, effect.vpd.name2idx[name]] += slider - display_mean
269
+ vp.clamp_(-0.5, 0.5)
270
+
271
+ for name in show_params_names:
272
+ create_slider(name)
273
+
274
+ others_idx = set(range(len(effect.vpd.vp_ranges))) - set([effect.vpd.name2idx[name] for name in show_params_names])
275
+ others_names = [effect.vpd.vp_ranges[i][0] for i in sorted(list(others_idx))]
276
+ other_param = st.selectbox("Other parameters: ", others_names)
277
+ create_slider(other_param)
278
+
279
+
280
+ reset_button = st.button("Reset Parameters", on_click=reset_params, args=(display_means, show_params_names))
281
+ if reset_button:
282
+ st.session_state["action"] = "reset"
283
+ st.experimental_rerun()
284
+
285
+ edit_locally_btn = st.button("Edit Local Parameter Maps")
286
+ if edit_locally_btn:
287
+ switch_page('️ local edits')
288
+
289
+ apply_presets = st.button("Paint Presets")
290
+ if apply_presets:
291
+ switch_page("Apply_preset")
292
+
293
+ img_res = render_effect(effect, content_img, vp)
294
+
295
+ st.session_state["result_vp"] = vp
296
+ st.session_state["effect_input"] = content_img
297
+ st.session_state["last_result"] = img_res
298
+
299
+ with coll1:
300
+ # width = int(img_res.width * 500 / img_res.height)
301
+ result_image_placeholder.image(img_res)#, width=width)
302
+
303
+ # a bit hacky way to return focus to top of page after clicking on images
304
+ components.html(
305
+ f"""
306
+ <p>{st.session_state.click_counter}</p>
307
+ <script>
308
+ window.parent.document.querySelector('section.main').scrollTo(0, 0);
309
+ </script>
310
+ """,
311
+ height=0
312
+ )
demo_config.py ADDED
@@ -0,0 +1 @@
 
 
1
+ HUGGING_FACE=True # if run in hugging face. Disables some things like full NST optimization
images/content/colibri.jpeg ADDED

Git LFS Details

  • SHA256: 402a17fefdfc8c8b9a64e94d22e82bdbb7f2af6e34ac14e03e7543f19c693824
  • Pointer size: 131 Bytes
  • Size of remote file: 227 kB
images/content/portrait.jpeg ADDED

Git LFS Details

  • SHA256: be928f0ca5e1649b481731bf2a5ae8437ff5a5bed2da7112050e33df23a931a3
  • Pointer size: 132 Bytes
  • Size of remote file: 1.02 MB
images/content/tubingen.jpeg ADDED

Git LFS Details

  • SHA256: a527b42944afe39816e18bab1fcc3f10c82bed726d2c4c8ccc7987e312797269
  • Pointer size: 131 Bytes
  • Size of remote file: 311 kB
images/screen_wise_demo.jpg ADDED

Git LFS Details

  • SHA256: a4de198cb84e2d78113942b00c06003cc67a3564f0c4813961d01ccaa8e71df4
  • Pointer size: 131 Bytes
  • Size of remote file: 171 kB
images/style/candy.jpg ADDED

Git LFS Details

  • SHA256: 1af09b2c18a6674b7d88849cb87564dd77e1ce04d1517bb085449b614cc0c8d8
  • Pointer size: 131 Bytes
  • Size of remote file: 376 kB
images/style/feathers.jpg ADDED

Git LFS Details

  • SHA256: d4b730ee0360cd9186de22177666223848e160c876082ccf24ed882340461cb9
  • Pointer size: 131 Bytes
  • Size of remote file: 322 kB
images/style/mosaic.jpg ADDED

Git LFS Details

  • SHA256: 17698add60e1ce8618103c04e6cdddc5ac21b82181a37b7517413f36a5b785a4
  • Pointer size: 130 Bytes
  • Size of remote file: 77.4 kB
images/style/starry_night.jpg ADDED

Git LFS Details

  • SHA256: 0592ab0f7b51b3ce72608a95e53880bd93971e5b48f93a0ce8dfd3cbccf3d098
  • Pointer size: 131 Bytes
  • Size of remote file: 315 kB
images/style/the_scream.jpg ADDED

Git LFS Details

  • SHA256: f3075cddeb974a9ee1d6418f7f33fd05185a7ac7dcb4288bd2805d499a9dcaee
  • Pointer size: 131 Bytes
  • Size of remote file: 217 kB
images/style/udnie.jpg ADDED

Git LFS Details

  • SHA256: c3d3d4b9e325b485820d1a91df3bad591c2bab86919412e9f1b424e862cf204a
  • Pointer size: 131 Bytes
  • Size of remote file: 465 kB
images/style/wave.jpg ADDED

Git LFS Details

  • SHA256: 14bcf58a39b7095168f27756d76d0c615bb8954acc5eae9e62c66a9684c2ee2b
  • Pointer size: 131 Bytes
  • Size of remote file: 149 kB
images/style/woman_with_hat.jpg ADDED

Git LFS Details

  • SHA256: 3bc5f08382a949f737128daa17e3e1ebd80a948c88527168dbe04e98a3eb29db
  • Pointer size: 131 Bytes
  • Size of remote file: 127 kB
pages/1_🎨_Apply_preset.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import torch.nn.functional as F
4
+ import torch
5
+
6
+ PACKAGE_PARENT = '..'
7
+ WISE_DIR = '../wise/'
8
+ SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
9
+ sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
10
+ sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, WISE_DIR)))
11
+
12
+
13
+ import numpy as np
14
+ from PIL import Image
15
+ import streamlit as st
16
+ from streamlit_drawable_canvas import st_canvas
17
+
18
+ from effects.minimal_pipeline import MinimalPipelineEffect
19
+ from helpers.visual_parameter_def import minimal_pipeline_presets, minimal_pipeline_bump_mapping_preset, minimal_pipeline_xdog_preset
20
+ from helpers import torch_to_np, np_to_torch
21
+ from effects import get_default_settings
22
+ from demo_config import HUGGING_FACE
23
+
24
+ st.set_page_config(page_title="Preset Edit Demo", layout="wide")
25
+
26
+
27
+ # @st.cache(hash_funcs={OilPaintEffect: id})
28
+ @st.cache(hash_funcs={MinimalPipelineEffect: id})
29
+ def local_edits_create_effect():
30
+ effect, preset, param_set = get_default_settings("minimal_pipeline")
31
+ effect.enable_checkpoints()
32
+ effect.cuda()
33
+ return effect, param_set
34
+
35
+
36
+ effect, param_set = local_edits_create_effect()
37
+ presets = {
38
+ "original": minimal_pipeline_presets,
39
+ "bump mapped": minimal_pipeline_bump_mapping_preset,
40
+ "contoured": minimal_pipeline_xdog_preset
41
+ }
42
+
43
+ st.session_state["action"] = "switch_page_from_presets" # on switchback, remember effect input
44
+
45
+ active_preset = st.sidebar.selectbox("apply preset: ", ["original", "bump mapped", "contoured"])
46
+ blend_strength = st.sidebar.slider("Parameter blending strength (non-hue) : ", 0.0, 1.0, 1.0, 0.05)
47
+ hue_blend_strength = st.sidebar.slider("Hue-shift blending strength : ", 0.0, 1.0, 1.0, 0.05)
48
+
49
+ st.sidebar.text("Drawing options:")
50
+ stroke_width = st.sidebar.slider("Stroke width: ", 1, 80, 40)
51
+ drawing_mode = st.sidebar.selectbox(
52
+ "Drawing tool:", ("freedraw", "line", "rect", "circle", "transform")
53
+ )
54
+
55
+ st.session_state["preset_canvas_key"] ="preset_canvas"
56
+
57
+ vp = torch.clone(st.session_state["result_vp"])
58
+ org_cuda = st.session_state["effect_input"]
59
+
60
+ @st.experimental_memo
61
+ def greyscale_original(_org_cuda, content_id): #content_id is used for hashing
62
+ if HUGGING_FACE:
63
+ wsize = 450
64
+ img_org_height, img_org_width = _org_cuda.shape[-2:]
65
+ wpercent = (wsize / float(img_org_width))
66
+ hsize = int((float(img_org_height) * float(wpercent)))
67
+ else:
68
+ longest_edge = 670
69
+ img_org_height, img_org_width = _org_cuda.shape[-2:]
70
+ max_width_height = max(img_org_width, img_org_height)
71
+ hsize = int((float(longest_edge) * float(float(img_org_height) / max_width_height)))
72
+ wsize = int((float(longest_edge) * float(float(img_org_width) / max_width_height)))
73
+
74
+ org_img = F.interpolate(_org_cuda, (hsize, wsize), mode="bilinear")
75
+ org_img = torch.mean(org_img, dim=1, keepdim=True) / 2.0
76
+ org_img = torch_to_np(org_img, multiply_by_255=True)[..., np.newaxis].repeat(3, axis=2)
77
+ org_img = Image.fromarray(org_img.astype(np.uint8))
78
+ return org_img, hsize, wsize
79
+
80
+ greyscale_img, hsize, wsize = greyscale_original(org_cuda, st.session_state["Content_id"])
81
+
82
+ coll1, coll2 = st.columns(2)
83
+ coll1.header("Draw Mask")
84
+ coll2.header("Live Result")
85
+
86
+ with coll1:
87
+ # Create a canvas component
88
+ canvas_result = st_canvas(
89
+ fill_color="rgba(0, 0, 0, 1)", # Fixed fill color with some opacity
90
+ stroke_width=stroke_width,
91
+ background_image=greyscale_img,
92
+ width=greyscale_img.width,
93
+ height=greyscale_img.height,
94
+ drawing_mode=drawing_mode,
95
+ key=st.session_state["preset_canvas_key"]
96
+ )
97
+
98
+
99
+ res_data = None
100
+ if canvas_result.image_data is not None:
101
+ abc = np_to_torch(canvas_result.image_data.astype(np.float)).sum(dim=1, keepdim=True).cuda()
102
+
103
+ img_org_width = org_cuda.shape[-1]
104
+ img_org_height = org_cuda.shape[-2]
105
+ res_data = F.interpolate(abc, (img_org_height, img_org_width)).squeeze(1)
106
+
107
+ preset_tensor = effect.vpd.preset_tensor(presets[active_preset], org_cuda, add_local_dims=True)
108
+ hue = torch.clone(vp[:,effect.vpd.name2idx["hueShift"]])
109
+ vp[:] = preset_tensor * res_data * blend_strength + vp[:] * (1 - res_data * blend_strength)
110
+ vp[:, effect.vpd.name2idx["hueShift"]] = \
111
+ preset_tensor[:,effect.vpd.name2idx["hueShift"]] * res_data * hue_blend_strength + hue * (1 - res_data * hue_blend_strength)
112
+
113
+ with torch.no_grad():
114
+ result_cuda = effect(org_cuda, vp)
115
+
116
+ img_res = Image.fromarray((torch_to_np(result_cuda) * 255.0).astype(np.uint8))
117
+ coll2.image(img_res)
118
+
119
+ apply_btn = st.sidebar.button("Apply")
120
+ if apply_btn:
121
+ st.session_state["result_vp"] = vp
pages/2_🖌️_Local_edits.py ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+
4
+ import torch.nn.functional as F
5
+ import torch
6
+ import numpy as np
7
+ import matplotlib
8
+ from matplotlib import pyplot as plt
9
+ import matplotlib.cm
10
+ from PIL import Image
11
+
12
+ import streamlit as st
13
+ from streamlit_drawable_canvas import st_canvas
14
+
15
+
16
+ PACKAGE_PARENT = '..'
17
+ WISE_DIR = '../wise/'
18
+ SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
19
+ sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
20
+ sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, WISE_DIR)))
21
+
22
+
23
+
24
+ from effects.gauss2d_xy_separated import Gauss2DEffect
25
+ from effects.minimal_pipeline import MinimalPipelineEffect
26
+ from helpers import torch_to_np, np_to_torch
27
+ from effects import get_default_settings
28
+ from demo_config import HUGGING_FACE
29
+
30
+ st.set_page_config(page_title="Editing Demo", layout="wide")
31
+
32
+ # @st.cache(hash_funcs={OilPaintEffect: id})
33
+ @st.cache(hash_funcs={MinimalPipelineEffect: id})
34
+ def local_edits_create_effect():
35
+ effect, preset, param_set = get_default_settings("minimal_pipeline")
36
+ effect.enable_checkpoints()
37
+ effect.cuda()
38
+ return effect, param_set
39
+
40
+
41
+ effect, param_set = local_edits_create_effect()
42
+
43
+ @st.experimental_memo
44
+ def gen_param_strength_fig():
45
+ cmap = matplotlib.cm.get_cmap('plasma')
46
+ # cmap show
47
+ gradient = np.linspace(0, 1, 256)
48
+ gradient = np.vstack((gradient, gradient))
49
+ fig, ax = plt.subplots(figsize=(3, 0.1))
50
+ fig.patch.set_alpha(0.0)
51
+ ax.set_title("parameter strength", fontsize=6.5, loc="left")
52
+ ax.imshow(gradient, aspect='auto', cmap=cmap)
53
+ ax.set_axis_off()
54
+ return fig, cmap
55
+
56
+ cmap_fig, cmap = gen_param_strength_fig()
57
+
58
+ st.session_state["canvas_key"] = "canvas"
59
+ try:
60
+ vp = st.session_state["result_vp"]
61
+ org_cuda = st.session_state["effect_input"]
62
+ except KeyError as e:
63
+ print("init run, certain keys not found. If this happens once its ok.")
64
+
65
+ if st.session_state["action"] != "switch_page_from_local_edits":
66
+ st.session_state.local_edit_action = "init"
67
+
68
+ st.session_state["action"] = "switch_page_from_local_edits" # on switchback, remember effect input
69
+
70
+ if "mask_edit_counter" not in st.session_state:
71
+ st.session_state["mask_edit_counter"] = 1
72
+ if "initial_drawing" not in st.session_state:
73
+ st.session_state["initial_drawing"] = {"random": st.session_state["mask_edit_counter"], "background": "#eee"}
74
+
75
+ def on_slider_change():
76
+ if st.session_state.local_edit_action == "init":
77
+ st.stop()
78
+ st.session_state.local_edit_action = "slider"
79
+
80
+ def on_param_change():
81
+ st.session_state.local_edit_action = "param_change"
82
+
83
+ active_param = st.sidebar.selectbox("active parameter: ", param_set + ["smooth"], index=2, on_change=on_param_change)
84
+
85
+ st.sidebar.text("Drawing options")
86
+ if active_param != "smooth":
87
+ plus_or_minus = st.sidebar.slider("Increase or decrease param map: ", -1.0, 1.0, 0.8, 0.05,
88
+ on_change=on_slider_change)
89
+ else:
90
+ sigma = st.sidebar.slider("Sigma: ", 0.1, 10.0, 0.5, 0.1, on_change=on_slider_change)
91
+
92
+ stroke_width = st.sidebar.slider("Stroke width: ", 1, 50, 20, on_change=on_slider_change)
93
+ drawing_mode = st.sidebar.selectbox(
94
+ "Drawing tool:", ("freedraw", "line", "rect", "circle", "transform"), on_change=on_slider_change,
95
+ )
96
+
97
+ st.sidebar.text("Viewing options")
98
+ if active_param != "smooth":
99
+ overlay = st.sidebar.slider("show parameter overlay: ", 0.0, 1.0, 0.8, 0.02, on_change=on_slider_change)
100
+ st.sidebar.pyplot(cmap_fig, bbox_inches='tight', pad_inches=0)
101
+
102
+ st.sidebar.text("Update:")
103
+ realtime_update = st.sidebar.checkbox("Update in realtime", True)
104
+ clear_after_draw = st.sidebar.checkbox("Clear Canvas after each Stroke", False)
105
+ invert_selection = st.sidebar.checkbox("Invert Selection", False)
106
+
107
+
108
+ @st.experimental_memo
109
+ def greyscale_org(_org_cuda, content_id): #content_id is used for hashing
110
+ if HUGGING_FACE:
111
+ wsize = 450
112
+ img_org_height, img_org_width = _org_cuda.shape[-2:]
113
+ wpercent = (wsize / float(img_org_width))
114
+ hsize = int((float(img_org_height) * float(wpercent)))
115
+ else:
116
+ longest_edge = 670
117
+ img_org_height, img_org_width = _org_cuda.shape[-2:]
118
+ max_width_height = max(img_org_width, img_org_height)
119
+ hsize = int((float(longest_edge) * float(float(img_org_height) / max_width_height)))
120
+ wsize = int((float(longest_edge) * float(float(img_org_width) / max_width_height)))
121
+
122
+ org_img = F.interpolate(_org_cuda, (hsize, wsize), mode="bilinear")
123
+ org_img = torch.mean(org_img, dim=1, keepdim=True) / 2.0
124
+ org_img = torch_to_np(org_img)[..., np.newaxis].repeat(3, axis=2)
125
+ return org_img, hsize, wsize
126
+
127
+ def generate_param_mask(vp):
128
+ greyscale_img, hsize, wsize = greyscale_org(org_cuda, st.session_state["Content_id"])
129
+ if active_param != "smooth":
130
+ scaled_vp = F.interpolate(vp, (hsize, wsize))[:, effect.vpd.name2idx[active_param]]
131
+ param_cmapped = cmap((scaled_vp + 0.5).cpu().numpy())[...,:3][0]
132
+ greyscale_img = greyscale_img * (1 - overlay) + param_cmapped * overlay
133
+ return Image.fromarray((greyscale_img * 255).astype(np.uint8))
134
+
135
+ def compute_results(_vp):
136
+ if "cached_canvas" in st.session_state and st.session_state["cached_canvas"].image_data is not None:
137
+ canvas_result = st.session_state["cached_canvas"]
138
+ abc = np_to_torch(canvas_result.image_data.astype(np.float32)).sum(dim=1, keepdim=True).cuda()
139
+
140
+ if invert_selection:
141
+ abc = abc * (- 1.0) + 1.0
142
+
143
+ img_org_width = org_cuda.shape[-1]
144
+ img_org_height = org_cuda.shape[-2]
145
+ res_data = F.interpolate(abc, (img_org_height, img_org_width)).squeeze(1)
146
+
147
+ if active_param != "smooth":
148
+ _vp[:, effect.vpd.name2idx[active_param]] += plus_or_minus * res_data
149
+ _vp.clamp_(-0.5, 0.5)
150
+ else:
151
+ gauss2dx = Gauss2DEffect(dxdy=[1.0, 0.0], dim_kernsize=5)
152
+ gauss2dy = Gauss2DEffect(dxdy=[0.0, 1.0], dim_kernsize=5)
153
+
154
+ vp_smoothed = gauss2dx(_vp, torch.tensor(sigma).cuda())
155
+ vp_smoothed = gauss2dy(vp_smoothed, torch.tensor(sigma).cuda())
156
+
157
+ print(res_data.shape)
158
+ print(_vp.shape)
159
+ print(vp_smoothed.shape)
160
+ _vp = torch.lerp(_vp, vp_smoothed, res_data.unsqueeze(1))
161
+
162
+ with torch.no_grad():
163
+ result_cuda = effect(org_cuda, _vp)
164
+
165
+ _, hsize, wsize = greyscale_org(org_cuda, st.session_state["Content_id"])
166
+ result_cuda = F.interpolate(result_cuda, (hsize, wsize), mode="bilinear")
167
+
168
+ return Image.fromarray((torch_to_np(result_cuda) * 255.0).astype(np.uint8)), _vp
169
+
170
+ coll1, coll2 = st.columns(2)
171
+ coll1.header("Draw Mask:")
172
+ coll2.header("Live Result")
173
+
174
+ # there is no way of removing the canvas history/state without rerunning the whole program.
175
+ # therefore, giving the canvas a initial_drawing that differs from the canvas state will clear the background
176
+ def mark_canvas_for_redraw():
177
+ print("mark for redraw")
178
+ st.session_state["mask_edit_counter"] += 1 # change state of initial drawing
179
+ initial_drawing = {"random": st.session_state["mask_edit_counter"], "background": "#eee"}
180
+ st.session_state["initial_drawing"] = initial_drawing
181
+
182
+
183
+ with coll1:
184
+ print("edit action", st.session_state.local_edit_action)
185
+ if clear_after_draw and st.session_state.local_edit_action not in ("slider", "param_change", "init"):
186
+ if st.session_state.local_edit_action == "redraw":
187
+ st.session_state.local_edit_action = "draw"
188
+ mark_canvas_for_redraw()
189
+ else:
190
+ st.session_state.local_edit_action = "redraw"
191
+
192
+ mask = generate_param_mask(st.session_state["result_vp"])
193
+ st.session_state["last_mask"] = mask
194
+
195
+ # Create a canvas component
196
+ canvas_result = st_canvas(
197
+ fill_color="rgba(0, 0, 0, 1)",
198
+ stroke_width=stroke_width,
199
+ background_image=mask,
200
+ update_streamlit=realtime_update,
201
+ width=mask.width,
202
+ height=mask.height,
203
+ initial_drawing=st.session_state["initial_drawing"],
204
+ drawing_mode=drawing_mode,
205
+ key=st.session_state.canvas_key,
206
+ )
207
+
208
+ if canvas_result.json_data is None:
209
+ print("stops")
210
+ st.stop()
211
+
212
+ st.session_state["cached_canvas"] = canvas_result
213
+
214
+ print("compute result")
215
+ img_res, vp = compute_results(vp)
216
+ st.session_state["last_result"] = img_res
217
+ st.session_state["result_vp"] = vp
218
+
219
+ st.markdown("### Mask: " + active_param)
220
+
221
+ if st.session_state.local_edit_action in ("slider", "param_change", "init"):
222
+ print("set redraw")
223
+ st.session_state.local_edit_action = "redraw"
224
+
225
+
226
+ print("plot masks")
227
+ texts = []
228
+ preview_masks = []
229
+ img = st.session_state["last_mask"]
230
+ for i, p in enumerate(param_set):
231
+ idx = effect.vpd.name2idx[p]
232
+ iii = F.interpolate(vp[:, idx:idx + 1] + 0.5, (int(img.height * 0.2), int(img.width * 0.2)))
233
+ texts.append(p[:15])
234
+ preview_masks.append(torch_to_np(iii))
235
+
236
+ coll2.image(img_res) # , use_column_width="auto")
237
+ ppp = st.columns(len(param_set))
238
+ for i, (txt, im) in enumerate(zip(texts, preview_masks)):
239
+ ppp[i].text(txt)
240
+ ppp[i].image(im, clamp=True)
241
+
242
+ print("....")
pages/3_📖_Readme.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+
3
+ st.title("White-box Style Transfer Editing")
4
+
5
+ st.markdown("""
6
+ This app demonstrates the editing capabilities of the White-box Style Transfer Editing (WISE) framework.
7
+ It optimizes the parameters of classical image processing filters to match a given style image.
8
+ After optimization, parameters can be tuned by hand to achieve a desired look.
9
+
10
+ ### How does it work?
11
+ We provide a small stylization effect that contains several filters such as bump mapping or edge enhancement that can be optimized. The optimization yields so-called parameter masks, which contain per pixel parameter settings of each filter.
12
+
13
+ ### How to use the app ?
14
+ - On the first page select existing content/style combinations or upload images to optimize.
15
+ - After the effect has been applied, use the parameter sliders to adjust a parameter value globally
16
+ - On the "apply preset" page, we defined several parameter presets that can be drawn on the image. Press "Apply" to make the changes permanent
17
+ - On the " local editing" page, individual parameter masks can be edited regionally. Choose the parameter on the left sidebar, and use the parameter strength slider to either increase or decrease the strength of the drawn strokes
18
+ - Strokes on the drawing canvas (left column) are updated in real-time on the result in the right column.
19
+ - Strokes stay on the canvas unless manually deleted by clicking the trash button. To remove them from the canvas after each stroke, tick the corresponding checkbox in the sidebar.
20
+
21
+ ### Links & Paper
22
+ [Project page](https://ivpg.hpi3d.de/wise/),
23
+ [arxiv link](https://arxiv.org/abs/2207.14606)
24
+ [demo code](https://github.com/MaxReimann/WISE-Editing)
25
+
26
+ "WISE: Whitebox Image Stylization by Example-based Learning", by Winfried Lötzsch*, Max Reimann*, Martin Büßemeyer, Amir Semmo, Jürgen Döllner, Matthias Trapp, in ECCV 2022
27
+
28
+ ### Further notes
29
+ Pull Requests and further improvements are very welcome.
30
+ On huggingface, optimization is currently disabled because it takes ~5min to optimize. If you want to try out your own images, please check out the app on [github](https://github.com/MaxReimann/WISE-Editing).
31
+ Please note that the shown effect is a minimal pipeline in terms of stylization capability, the much more feature-rich oilpaint and watercolor pipelines we show in our ECCV paper cannot be open-sourced due to IP reasons.
32
+ """)
precomputed/minimal_pipeline/colibri/starry_night/input.png ADDED

Git LFS Details

  • SHA256: fe3acfac990510bd5f1cb0bc61997de181815318798c001ad8f1b2534255ad43
  • Pointer size: 131 Bytes
  • Size of remote file: 436 kB
precomputed/minimal_pipeline/colibri/starry_night/vp.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8f40c444b91434d3372267e267b7668a93966d79b29592f3cc8de1dd2bc756c4
3
+ size 12753387
precomputed/minimal_pipeline/colibri/the_scream/input.png ADDED

Git LFS Details

  • SHA256: fe3acfac990510bd5f1cb0bc61997de181815318798c001ad8f1b2534255ad43
  • Pointer size: 131 Bytes
  • Size of remote file: 436 kB
precomputed/minimal_pipeline/colibri/the_scream/vp.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3ec0b6a86555481aca68c4a20f12980d827fe69754bc8ffba6a2b6cf09c95211
3
+ size 12753387
precomputed/minimal_pipeline/colibri/wave/input.png ADDED

Git LFS Details

  • SHA256: fe3acfac990510bd5f1cb0bc61997de181815318798c001ad8f1b2534255ad43
  • Pointer size: 131 Bytes
  • Size of remote file: 436 kB
precomputed/minimal_pipeline/colibri/wave/vp.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:013a9a3654664bdb201ab7a48e98711014c889fee5b05e1237fafa08a75dec67
3
+ size 12753387
precomputed/minimal_pipeline/colibri/woman_with_hat/input.png ADDED

Git LFS Details

  • SHA256: fe3acfac990510bd5f1cb0bc61997de181815318798c001ad8f1b2534255ad43
  • Pointer size: 131 Bytes
  • Size of remote file: 436 kB
precomputed/minimal_pipeline/colibri/woman_with_hat/vp.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3995afcba2a055d190b08e18661e6ca809cb9755d426696130c793793ec2c548
3
+ size 12753387
precomputed/minimal_pipeline/portrait/starry_night/input.png ADDED

Git LFS Details

  • SHA256: 43bc96103a540ba85ab398bdd88d229b7bf0c6599f3b454c4acc952e72232c40
  • Pointer size: 131 Bytes
  • Size of remote file: 545 kB
precomputed/minimal_pipeline/portrait/starry_night/vp.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:102c79861638e382fb79fbe203e7b643c500b93d400b63c06bebdad22150a490
3
+ size 14904747
precomputed/minimal_pipeline/portrait/the_scream/input.png ADDED

Git LFS Details

  • SHA256: 43bc96103a540ba85ab398bdd88d229b7bf0c6599f3b454c4acc952e72232c40
  • Pointer size: 131 Bytes
  • Size of remote file: 545 kB
precomputed/minimal_pipeline/portrait/the_scream/vp.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:24e4dde0fbaae2eded6c760b1ad15b56c1babe4ba70fb2dd2bc49beca95eee15
3
+ size 14904747
precomputed/minimal_pipeline/portrait/wave/input.png ADDED

Git LFS Details

  • SHA256: 43bc96103a540ba85ab398bdd88d229b7bf0c6599f3b454c4acc952e72232c40
  • Pointer size: 131 Bytes
  • Size of remote file: 545 kB
precomputed/minimal_pipeline/portrait/wave/vp.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51a9fe9033f2b77fc21b5f1c546e3a9af1079bc60c0f2d4b61a0ad7b35b7034f
3
+ size 14904747
precomputed/minimal_pipeline/portrait/woman_with_hat/input.png ADDED

Git LFS Details

  • SHA256: 43bc96103a540ba85ab398bdd88d229b7bf0c6599f3b454c4acc952e72232c40
  • Pointer size: 131 Bytes
  • Size of remote file: 545 kB
precomputed/minimal_pipeline/portrait/woman_with_hat/vp.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4384dc543accfec73dec64f5c02cc8f11ed7ac281edcbaea6db2bddc35718acb
3
+ size 14904747
precomputed/minimal_pipeline/tubingen/starry_night/input.png ADDED

Git LFS Details

  • SHA256: c3d16ea5a28795798bc1cf927a17c5d9d38a56c519b0f90c98c05f7f742f754c
  • Pointer size: 131 Bytes
  • Size of remote file: 589 kB
precomputed/minimal_pipeline/tubingen/starry_night/vp.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75a63bfc9e71641e1f7a7b99127aaa8f16c1ed40636a210b523fb5c82f73abd6
3
+ size 12416427
precomputed/minimal_pipeline/tubingen/the_scream/input.png ADDED

Git LFS Details

  • SHA256: c3d16ea5a28795798bc1cf927a17c5d9d38a56c519b0f90c98c05f7f742f754c
  • Pointer size: 131 Bytes
  • Size of remote file: 589 kB
precomputed/minimal_pipeline/tubingen/the_scream/vp.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f0118f3aae6559ae743255026cc6d81019bd8b907e2fa59a93f3c2a54176eb4
3
+ size 12416427
precomputed/minimal_pipeline/tubingen/wave/input.png ADDED

Git LFS Details

  • SHA256: c3d16ea5a28795798bc1cf927a17c5d9d38a56c519b0f90c98c05f7f742f754c
  • Pointer size: 131 Bytes
  • Size of remote file: 589 kB
precomputed/minimal_pipeline/tubingen/wave/vp.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c6412df16c03f5d48d9c2fe5e91449c99cc2b6fc240fd7c385edc5ab941526d4
3
+ size 12416427
precomputed/minimal_pipeline/tubingen/woman_with_hat/input.png ADDED

Git LFS Details

  • SHA256: c3d16ea5a28795798bc1cf927a17c5d9d38a56c519b0f90c98c05f7f742f754c
  • Pointer size: 131 Bytes
  • Size of remote file: 589 kB
precomputed/minimal_pipeline/tubingen/woman_with_hat/vp.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52a9e9c1f716bb3dbc6f5adbf7274356f3f06545f404244a930ecf7155389f99
3
+ size 12416427
requirements.txt ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ imageio
2
+ imageio-ffmpeg
3
+ matplotlib
4
+ Pillow
5
+ numpy
6
+ --extra-index-url https://download.pytorch.org/whl/cu113
7
+ torch
8
+ torchvision
9
+ streamlit==1.10.0
10
+ streamlit_drawable_canvas==0.8.0
11
+ streamlit_extras==0.1.5
12
+ st_click_detector
13
+ scipy
wise ADDED
@@ -0,0 +1 @@
 
 
1
+ Subproject commit e4d085e22a081738667b32167f7901fa5febe25a