SahilCarterr commited on
Commit
cb0e352
·
verified ·
1 Parent(s): 47eb494

Upload 32 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,12 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ docs/static/images/application.jpg filter=lfs diff=lfs merge=lfs -text
37
+ docs/static/images/cross_custom.jpg filter=lfs diff=lfs merge=lfs -text
38
+ docs/static/images/framework.jpg filter=lfs diff=lfs merge=lfs -text
39
+ docs/static/images/gradio_preview.png filter=lfs diff=lfs merge=lfs -text
40
+ docs/static/images/icon.jpg filter=lfs diff=lfs merge=lfs -text
41
+ docs/static/images/multilingual_samples.png filter=lfs diff=lfs merge=lfs -text
42
+ docs/static/images/non-text.jpg filter=lfs diff=lfs merge=lfs -text
43
+ docs/static/images/self_custom.jpg filter=lfs diff=lfs merge=lfs -text
44
+ docs/static/images/teaser.jpg filter=lfs diff=lfs merge=lfs -text
docs/.DS_Store ADDED
Binary file (6.15 kB). View file
 
docs/index.html ADDED
@@ -0,0 +1,247 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html>
3
+ <head>
4
+ <meta charset="utf-8">
5
+ <meta name="viewport" content="width=device-width, initial-scale=1">
6
+ <title>Calligrapher: Freestyle - Text Image Customization</title>
7
+ <link rel="icon" href="./static/images/icon.jpg">
8
+
9
+ <link href="https://fonts.googleapis.com/css?family=Google+Sans|Noto+Sans|Castoro" rel="stylesheet">
10
+
11
+ <link rel="stylesheet" href="./static/css/bulma.min.css">
12
+ <link rel="stylesheet" href="./static/css/bulma-carousel.min.css">
13
+ <link rel="stylesheet" href="./static/css/bulma-slider.min.css">
14
+ <link rel="stylesheet" href="./static/css/fontawesome.all.min.css">
15
+ <link rel="stylesheet" href="https://cdn.jsdelivr.net/gh/jpswalsh/academicons@1/css/academicons.min.css">
16
+ <link rel="stylesheet" href="./static/css/index.css">
17
+
18
+ <script src="https://ajax.googleapis.com/ajax/libs/jquery/3.5.1/jquery.min.js"></script>
19
+ <script defer src="./static/js/fontawesome.all.min.js"></script>
20
+ <script src="./static/js/bulma-carousel.min.js"></script>
21
+ <script src="./static/js/bulma-slider.min.js"></script>
22
+ <script src="./static/js/index.js"></script>
23
+ </head>
24
+ <body>
25
+
26
+
27
+ <section class="hero">
28
+ <div class="hero-body">
29
+ <div class="container is-max-desktop">
30
+ <div class="columns is-centered">
31
+ <div class="column has-text-centered">
32
+ <h1 class="title is-1 publication-title">Calligrapher: </h1>
33
+ <h1 class="title is-1 publication-title">Freestyle Text Image Customization</h1>
34
+
35
+ <div class="column has-text-centered">
36
+ <div class="publication-links">
37
+ <!-- Video Link. -->
38
+ <span class="link-block">
39
+ <a href="https://youtu.be/FLSPphkylQE"
40
+ class="external-link button is-normal is-rounded is-dark">
41
+ <span class="icon">
42
+ <i class="fab fa-youtube"></i>
43
+ </span>
44
+ <span>Video</span>
45
+ </a>
46
+ </span>
47
+ <!-- Code Link. -->
48
+ <span class="link-block">
49
+ <a href="https://github.com/Calligrapher2025/Calligrapher"
50
+ class="external-link button is-normal is-rounded is-dark">
51
+ <span class="icon">
52
+ <i class="fab fa-github"></i>
53
+ </span>
54
+ <span>Code</span>
55
+ </a>
56
+ </span>
57
+ <!-- Dataset Link. -->
58
+ <span class="link-block">
59
+ <a href="https://huggingface.co/Calligrapher2025/Calligrapher"
60
+ class="external-link button is-normal is-rounded is-dark">
61
+ <span class="icon">
62
+ <i class="far fa-images"></i>
63
+ </span>
64
+ <span>Model & Data</span>
65
+ </a>
66
+ </span>
67
+
68
+ </div>
69
+ </div>
70
+ </div>
71
+ </div>
72
+ </div>
73
+ </div>
74
+
75
+
76
+
77
+ </section>
78
+
79
+ <section class="hero teaser">
80
+ <div class="container is-max-desktop">
81
+ <div class="hero-body">
82
+ <img src="./static/images/teaser.jpg" alt="Teaser Image" style="width: 100%;" />
83
+ <h2 class="subtitle has-text-centered teaser-subtitle">
84
+ Photorealistic text image customization results produced by our proposed
85
+ <span><strong>Calligrapher</strong>,</span> which allows users to perform customization with
86
+ diverse stylized images and text prompts.
87
+ </h2>
88
+ </div>
89
+ </div>
90
+ </section>
91
+
92
+
93
+
94
+ <section class="section">
95
+ <div class="container is-max-desktop">
96
+ <!-- Abstract. -->
97
+ <div class="columns is-centered has-text-centered">
98
+ <div class="column is-four-fifths">
99
+ <h2 class="title is-3">Abstract</h2>
100
+ <div class="content has-text-justified">
101
+
102
+ <p>
103
+ We introduce Calligrapher, a novel diffusion-based framework that innovatively integrates advanced text customization with artistic typography for digital calligraphy and design applications. Addressing the challenges of precise style control and data dependency in typographic customization, our framework incorporates three key technical contributions. First, we develop a self-distillation mechanism that leverages the pre-trained text-to-image generative model itself alongside the large language model to automatically construct a style-centric typography benchmark. Second, we introduce a localized style injection framework via a trainable style encoder, which comprises both Qformer and linear layers, to extract robust style features from reference images. An in-context generation mechanism is also employed to directly embed reference images into the denoising process, further enhancing the refined alignment of target styles. Extensive quantitative and qualitative evaluations across diverse fonts and design contexts confirm Calligrapher's accurate reproduction of intricate stylistic details and precise glyph positioning. By automating high-quality, visually consistent typography, Calligrapher surpasses traditional models, empowering creative practitioners in digital art, branding, and contextual typographic design.
104
+ </p>
105
+
106
+ </div>
107
+ </div>
108
+ </div>
109
+ <!--/ Abstract. -->
110
+
111
+ <!-- Paper video. -->
112
+ <div class="columns is-centered has-text-centered">
113
+ <div class="column is-four-fifths">
114
+ <h2 class="title is-3" style="margin-bottom: 1.5rem;">Demo Video</h2>
115
+ <div class="publication-video">
116
+ <iframe src="https://www.youtube.com/embed/FLSPphkylQE"
117
+ frameborder="0" allow="autoplay; encrypted-media" allowfullscreen></iframe>
118
+ </div>
119
+ </div>
120
+ </div>
121
+ <!--/ Paper video. -->
122
+ </div>
123
+ </section>
124
+
125
+ <section class="hero framework">
126
+ <div class="container is-max-desktop">
127
+ <div class="hero-body">
128
+ <h1 class="title is-2 has-text-centered" style="margin-bottom: 2rem;">
129
+ Framework
130
+ </h1>
131
+ <div style="overflow: hidden; border-radius: 10px;">
132
+ <img src="./static/images/framework.jpg"
133
+ alt="Framework Diagram"
134
+ style="width: 100%; height: auto; object-fit: cover;" />
135
+ </div>
136
+
137
+
138
+ <div class="content has-text-justified">
139
+
140
+ <p>
141
+ Training framework of <strong>Calligrapher</strong>, demonstrating the integration of localized style injection and diffusion-based learning. The framework
142
+ processes masked images through a Variational Auto-Encoder (VAE) to obtain latent representations, concatenated with mask and noise latents. A style
143
+ encoder comprising a visual encoder, Qformer, and linear layers is designed to extract style-related features from the reference style image, while text
144
+ embeddings (e.g., "gic" in the case) modulate the denoising transformer. In the denoising block, style attention predicted from the style features replaces the
145
+ original cross-attention, injecting style embeddings with the denoiser's query to enable granular typographic control in the latent space. The
146
+ model is optimized under the flow-matching learning objective with the self-distillation typography dataset.
147
+ </p>
148
+
149
+ </div>
150
+ </div>
151
+ </div>
152
+ </section>
153
+
154
+
155
+ <section class="section">
156
+ <div class="container is-max-desktop">
157
+
158
+ <div class="columns is-centered">
159
+ <div class="column is-full-width">
160
+ <h2 class="title is-3">Application</h2>
161
+ <div class="content has-text-justified">
162
+ <p>
163
+ Qualitative results of Calligrapher under various settings. We demonstrate text customization results respectively under settings of (a) self-reference, (b) cross-reference, and (c) non-text reference. Reference-based image generation results are also incorporated in (d).
164
+ </p>
165
+ </div>
166
+ <div style="overflow: hidden; border-radius: 10px;" class="content has-text-centered">
167
+ <img src="./static/images/application.jpg"
168
+ alt="Application Results"
169
+ style="width: 100%; height: auto; object-fit: cover;" />
170
+ </div>
171
+
172
+ <h2 class="title is-3">Multilingual Samples</h2>
173
+ <div style="overflow: hidden; border-radius: 10px;" class="content has-text-centered">
174
+ <img src="./static/images/multilingual_samples.png"
175
+ alt="Multilingual Results"
176
+ style="width: 100%; height: auto; object-fit: cover;" />
177
+ <p class="image-caption">
178
+ Multilingual freestyle text customization results. Tested languages and text: Chinese (你好朋友/夏天来了), Korean (서예가), and Japanese (ナルト).
179
+ </p>
180
+ </div>
181
+
182
+ <h2 class="title is-3">Gallery</h2>
183
+
184
+ <div style="overflow: hidden; border-radius: 10px;" class="content has-text-centered">
185
+ <img src="./static/images/self_custom.jpg"
186
+ alt="Self-reference Results"
187
+ style="width: 100%; height: auto; object-fit: cover;" />
188
+ <p class="image-caption">
189
+ Self-reference text image customization results.
190
+ </p>
191
+ </div>
192
+
193
+ <div style="overflow: hidden; border-radius: 10px;" class="content has-text-centered">
194
+ <img src="./static/images/cross_custom.jpg"
195
+ alt="Cross-reference Results"
196
+ style="width: 100%; height: auto; object-fit: cover;" />
197
+ <p class="image-caption">
198
+ Cross-reference text image customization results.
199
+ </p>
200
+ </div>
201
+
202
+ <div style="overflow: hidden; border-radius: 10px;" class="content has-text-centered">
203
+ <img src="./static/images/non-text.jpg"
204
+ alt="Non-text reference Results"
205
+ style="width: 100%; height: auto; object-fit: cover;" />
206
+ <p class="image-caption">
207
+ Non-text reference text image customization results.
208
+ </p>
209
+ </div>
210
+
211
+
212
+ </div>
213
+ </div>
214
+
215
+ </div>
216
+ </section>
217
+
218
+
219
+ <footer class="footer">
220
+ <div class="container">
221
+ <div class="content has-text-centered">
222
+ <a class="icon-link" href="https://github.com/Calligrapher2025/Calligrapher" disabled="">
223
+ <svg class="svg-inline--fa fa-github fa-w-16" aria-hidden="true" focusable="false" data-prefix="fab" data-icon="github" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 496 512" data-fa-i2svg=""><path fill="currentColor" d="M165.9 397.4c0 2-2.3 3.6-5.2 3.6-3.3.3-5.6-1.3-5.6-3.6 0-2 2.3-3.6 5.2-3.6 3-.3 5.6 1.3 5.6 3.6zm-31.1-4.5c-.7 2 1.3 4.3 4.3 4.9 2.6 1 5.6 0 6.2-2s-1.3-4.3-4.3-5.2c-2.6-.7-5.5.3-6.2 2.3zm44.2-1.7c-2.9.7-4.9 2.6-4.6 4.9.3 2 2.9 3.3 5.9 2.6 2.9-.7 4.9-2.6 4.6-4.6-.3-1.9-3-3.2-5.9-2.9zM244.8 8C106.1 8 0 113.3 0 252c0 110.9 69.8 205.8 169.5 239.2 12.8 2.3 17.3-5.6 17.3-12.1 0-6.2-.3-40.4-.3-61.4 0 0-70 15-84.7-29.8 0 0-11.4-29.1-27.8-36.6 0 0-22.9-15.7 1.6-15.4 0 0 24.9 2 38.6 25.8 21.9 38.6 58.6 27.5 72.9 20.9 2.3-16 8.8-27.1 16-33.7-55.9-6.2-112.3-14.3-112.3-110.5 0-27.5 7.6-41.3 23.6-58.9-2.6-6.5-11.1-33.3 2.6-67.9 20.9-6.5 69 27 69 27 20-5.6 41.5-8.5 62.8-8.5s42.8 2.9 62.8 8.5c0 0 48.1-33.6 69-27 13.7 34.7 5.2 61.4 2.6 67.9 16 17.7 25.8 31.5 25.8 58.9 0 96.5-58.9 104.2-114.8 110.5 9.2 7.9 17 22.9 17 46.4 0 33.7-.3 75.4-.3 83.6 0 6.5 4.6 14.4 17.3 12.1C428.2 457.8 496 362.9 496 252 496 113.3 383.5 8 244.8 8zM97.2 352.9c-1.3 1-1 3.3.7 5.2 1.6 1.6 3.9 2.3 5.2 1 1.3-1 1-3.3-.7-5.2-1.6-1.6-3.9-2.3-5.2-1zm-10.8-8.1c-.7 1.3.3 2.9 2.3 3.9 1.6 1 3.6.7 4.3-.7.7-1.3-.3-2.9-2.3-3.9-2-.6-3.6-.3-4.3.7zm32.4 35.6c-1.6 1.3-1 4.3 1.3 6.2 2.3 2.3 5.2 2.6 6.5 1 1.3-1.3.7-4.3-1.3-6.2-2.2-2.3-5.2-2.6-6.5-1zm-11.4-14.7c-1.6 1-1.6 3.6 0 5.9 1.6 2.3 4.3 3.3 5.6 2.3 1.6-1.3 1.6-3.9 0-6.2-1.4-2.3-4-3.3-5.6-2z"></path></svg><!-- <i class="fab fa-github"></i> Font Awesome fontawesome.com -->
224
+ </a>
225
+ </div>
226
+ <div class="columns is-centered">
227
+ <div class="column is-8">
228
+ <div class="content">
229
+ <p>
230
+ This website is licensed under a <a rel="license" href="http://creativecommons.org/licenses/by-sa/4.0/">Creative
231
+ Commons Attribution-ShareAlike 4.0 International License</a>.
232
+ </p>
233
+ <p>
234
+ This means you are free to borrow the <a href="https://github.com/nerfies/nerfies.github.io">source code</a> of this website,
235
+ we just ask that you link back to this page in the footer.
236
+ Please remember to remove the analytics code included in the header of the website which
237
+ you do not want on your website.
238
+ </p>
239
+ </div>
240
+ </div>
241
+ </div>
242
+ </div>
243
+ </footer>
244
+
245
+ </body>
246
+ </html>
247
+
docs/static/.DS_Store ADDED
Binary file (6.15 kB). View file
 
docs/static/css/bulma-carousel.min.css ADDED
@@ -0,0 +1 @@
 
 
1
+ @-webkit-keyframes spinAround{from{-webkit-transform:rotate(0);transform:rotate(0)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes spinAround{from{-webkit-transform:rotate(0);transform:rotate(0)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}.slider{position:relative;width:100%}.slider-container{display:flex;flex-wrap:nowrap;flex-direction:row;overflow:hidden;-webkit-transform:translate3d(0,0,0);transform:translate3d(0,0,0);min-height:100%}.slider-container.is-vertical{flex-direction:column}.slider-container .slider-item{flex:none}.slider-container .slider-item .image.is-covered img{-o-object-fit:cover;object-fit:cover;-o-object-position:center center;object-position:center center;height:100%;width:100%}.slider-container .slider-item .video-container{height:0;padding-bottom:0;padding-top:56.25%;margin:0;position:relative}.slider-container .slider-item .video-container.is-1by1,.slider-container .slider-item .video-container.is-square{padding-top:100%}.slider-container .slider-item .video-container.is-4by3{padding-top:75%}.slider-container .slider-item .video-container.is-21by9{padding-top:42.857143%}.slider-container .slider-item .video-container embed,.slider-container .slider-item .video-container iframe,.slider-container .slider-item .video-container object{position:absolute;top:0;left:0;width:100%!important;height:100%!important}.slider-navigation-next,.slider-navigation-previous{display:flex;justify-content:center;align-items:center;position:absolute;width:42px;height:42px;background:#fff center center no-repeat;background-size:20px 20px;border:1px solid #fff;border-radius:25091983px;box-shadow:0 2px 5px #3232321a;top:50%;margin-top:-20px;left:0;cursor:pointer;transition:opacity .3s,-webkit-transform .3s;transition:transform .3s,opacity .3s;transition:transform .3s,opacity .3s,-webkit-transform .3s}.slider-navigation-next:hover,.slider-navigation-previous:hover{-webkit-transform:scale(1.2);transform:scale(1.2)}.slider-navigation-next.is-hidden,.slider-navigation-previous.is-hidden{display:none;opacity:0}.slider-navigation-next svg,.slider-navigation-previous svg{width:25%}.slider-navigation-next{left:auto;right:0;background:#fff center center no-repeat;background-size:20px 20px}.slider-pagination{display:none;justify-content:center;align-items:center;position:absolute;bottom:0;left:0;right:0;padding:.5rem 1rem;text-align:center}.slider-pagination .slider-page{background:#fff;width:10px;height:10px;border-radius:25091983px;display:inline-block;margin:0 3px;box-shadow:0 2px 5px #3232321a;transition:-webkit-transform .3s;transition:transform .3s;transition:transform .3s,-webkit-transform .3s;cursor:pointer}.slider-pagination .slider-page.is-active,.slider-pagination .slider-page:hover{-webkit-transform:scale(1.4);transform:scale(1.4)}@media screen and (min-width:800px){.slider-pagination{display:flex}}.hero.has-carousel{position:relative}.hero.has-carousel+.hero-body,.hero.has-carousel+.hero-footer,.hero.has-carousel+.hero-head{z-index:10;overflow:hidden}.hero.has-carousel .hero-carousel{position:absolute;top:0;left:0;bottom:0;right:0;height:auto;border:none;margin:auto;padding:0;z-index:0}.hero.has-carousel .hero-carousel .slider{width:100%;max-width:100%;overflow:hidden;height:100%!important;max-height:100%;z-index:0}.hero.has-carousel .hero-carousel .slider .has-background{max-height:100%}.hero.has-carousel .hero-carousel .slider .has-background .is-background{-o-object-fit:cover;object-fit:cover;-o-object-position:center center;object-position:center center;height:100%;width:100%}.hero.has-carousel .hero-body{margin:0 3rem;z-index:10}
docs/static/css/bulma-slider.min.css ADDED
@@ -0,0 +1 @@
 
 
1
+ @-webkit-keyframes spinAround{from{-webkit-transform:rotate(0);transform:rotate(0)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}@keyframes spinAround{from{-webkit-transform:rotate(0);transform:rotate(0)}to{-webkit-transform:rotate(359deg);transform:rotate(359deg)}}input[type=range].slider{-webkit-appearance:none;-moz-appearance:none;appearance:none;margin:1rem 0;background:0 0;touch-action:none}input[type=range].slider.is-fullwidth{display:block;width:100%}input[type=range].slider:focus{outline:0}input[type=range].slider:not([orient=vertical])::-webkit-slider-runnable-track{width:100%}input[type=range].slider:not([orient=vertical])::-moz-range-track{width:100%}input[type=range].slider:not([orient=vertical])::-ms-track{width:100%}input[type=range].slider:not([orient=vertical]).has-output+output,input[type=range].slider:not([orient=vertical]).has-output-tooltip+output{width:3rem;background:#4a4a4a;border-radius:4px;padding:.4rem .8rem;font-size:.75rem;line-height:.75rem;text-align:center;text-overflow:ellipsis;white-space:nowrap;color:#fff;overflow:hidden;pointer-events:none;z-index:200}input[type=range].slider:not([orient=vertical]).has-output-tooltip:disabled+output,input[type=range].slider:not([orient=vertical]).has-output:disabled+output{opacity:.5}input[type=range].slider:not([orient=vertical]).has-output{display:inline-block;vertical-align:middle;width:calc(100% - (4.2rem))}input[type=range].slider:not([orient=vertical]).has-output+output{display:inline-block;margin-left:.75rem;vertical-align:middle}input[type=range].slider:not([orient=vertical]).has-output-tooltip{display:block}input[type=range].slider:not([orient=vertical]).has-output-tooltip+output{position:absolute;left:0;top:-.1rem}input[type=range].slider[orient=vertical]{-webkit-appearance:slider-vertical;-moz-appearance:slider-vertical;appearance:slider-vertical;-webkit-writing-mode:bt-lr;-ms-writing-mode:bt-lr;writing-mode:bt-lr}input[type=range].slider[orient=vertical]::-webkit-slider-runnable-track{height:100%}input[type=range].slider[orient=vertical]::-moz-range-track{height:100%}input[type=range].slider[orient=vertical]::-ms-track{height:100%}input[type=range].slider::-webkit-slider-runnable-track{cursor:pointer;animate:.2s;box-shadow:0 0 0 #7a7a7a;background:#dbdbdb;border-radius:4px;border:0 solid #7a7a7a}input[type=range].slider::-moz-range-track{cursor:pointer;animate:.2s;box-shadow:0 0 0 #7a7a7a;background:#dbdbdb;border-radius:4px;border:0 solid #7a7a7a}input[type=range].slider::-ms-track{cursor:pointer;animate:.2s;box-shadow:0 0 0 #7a7a7a;background:#dbdbdb;border-radius:4px;border:0 solid #7a7a7a}input[type=range].slider::-ms-fill-lower{background:#dbdbdb;border-radius:4px}input[type=range].slider::-ms-fill-upper{background:#dbdbdb;border-radius:4px}input[type=range].slider::-webkit-slider-thumb{box-shadow:none;border:1px solid #b5b5b5;border-radius:4px;background:#fff;cursor:pointer}input[type=range].slider::-moz-range-thumb{box-shadow:none;border:1px solid #b5b5b5;border-radius:4px;background:#fff;cursor:pointer}input[type=range].slider::-ms-thumb{box-shadow:none;border:1px solid #b5b5b5;border-radius:4px;background:#fff;cursor:pointer}input[type=range].slider::-webkit-slider-thumb{-webkit-appearance:none;appearance:none}input[type=range].slider.is-circle::-webkit-slider-thumb{border-radius:290486px}input[type=range].slider.is-circle::-moz-range-thumb{border-radius:290486px}input[type=range].slider.is-circle::-ms-thumb{border-radius:290486px}input[type=range].slider:active::-webkit-slider-thumb{-webkit-transform:scale(1.25);transform:scale(1.25)}input[type=range].slider:active::-moz-range-thumb{transform:scale(1.25)}input[type=range].slider:active::-ms-thumb{transform:scale(1.25)}input[type=range].slider:disabled{opacity:.5;cursor:not-allowed}input[type=range].slider:disabled::-webkit-slider-thumb{cursor:not-allowed;-webkit-transform:scale(1);transform:scale(1)}input[type=range].slider:disabled::-moz-range-thumb{cursor:not-allowed;transform:scale(1)}input[type=range].slider:disabled::-ms-thumb{cursor:not-allowed;transform:scale(1)}input[type=range].slider:not([orient=vertical]){min-height:calc((1rem + 2px) * 1.25)}input[type=range].slider:not([orient=vertical])::-webkit-slider-runnable-track{height:.5rem}input[type=range].slider:not([orient=vertical])::-moz-range-track{height:.5rem}input[type=range].slider:not([orient=vertical])::-ms-track{height:.5rem}input[type=range].slider[orient=vertical]::-webkit-slider-runnable-track{width:.5rem}input[type=range].slider[orient=vertical]::-moz-range-track{width:.5rem}input[type=range].slider[orient=vertical]::-ms-track{width:.5rem}input[type=range].slider::-webkit-slider-thumb{height:1rem;width:1rem}input[type=range].slider::-moz-range-thumb{height:1rem;width:1rem}input[type=range].slider::-ms-thumb{height:1rem;width:1rem}input[type=range].slider::-ms-thumb{margin-top:0}input[type=range].slider::-webkit-slider-thumb{margin-top:-.25rem}input[type=range].slider[orient=vertical]::-webkit-slider-thumb{margin-top:auto;margin-left:-.25rem}input[type=range].slider.is-small:not([orient=vertical]){min-height:calc((.75rem + 2px) * 1.25)}input[type=range].slider.is-small:not([orient=vertical])::-webkit-slider-runnable-track{height:.375rem}input[type=range].slider.is-small:not([orient=vertical])::-moz-range-track{height:.375rem}input[type=range].slider.is-small:not([orient=vertical])::-ms-track{height:.375rem}input[type=range].slider.is-small[orient=vertical]::-webkit-slider-runnable-track{width:.375rem}input[type=range].slider.is-small[orient=vertical]::-moz-range-track{width:.375rem}input[type=range].slider.is-small[orient=vertical]::-ms-track{width:.375rem}input[type=range].slider.is-small::-webkit-slider-thumb{height:.75rem;width:.75rem}input[type=range].slider.is-small::-moz-range-thumb{height:.75rem;width:.75rem}input[type=range].slider.is-small::-ms-thumb{height:.75rem;width:.75rem}input[type=range].slider.is-small::-ms-thumb{margin-top:0}input[type=range].slider.is-small::-webkit-slider-thumb{margin-top:-.1875rem}input[type=range].slider.is-small[orient=vertical]::-webkit-slider-thumb{margin-top:auto;margin-left:-.1875rem}input[type=range].slider.is-medium:not([orient=vertical]){min-height:calc((1.25rem + 2px) * 1.25)}input[type=range].slider.is-medium:not([orient=vertical])::-webkit-slider-runnable-track{height:.625rem}input[type=range].slider.is-medium:not([orient=vertical])::-moz-range-track{height:.625rem}input[type=range].slider.is-medium:not([orient=vertical])::-ms-track{height:.625rem}input[type=range].slider.is-medium[orient=vertical]::-webkit-slider-runnable-track{width:.625rem}input[type=range].slider.is-medium[orient=vertical]::-moz-range-track{width:.625rem}input[type=range].slider.is-medium[orient=vertical]::-ms-track{width:.625rem}input[type=range].slider.is-medium::-webkit-slider-thumb{height:1.25rem;width:1.25rem}input[type=range].slider.is-medium::-moz-range-thumb{height:1.25rem;width:1.25rem}input[type=range].slider.is-medium::-ms-thumb{height:1.25rem;width:1.25rem}input[type=range].slider.is-medium::-ms-thumb{margin-top:0}input[type=range].slider.is-medium::-webkit-slider-thumb{margin-top:-.3125rem}input[type=range].slider.is-medium[orient=vertical]::-webkit-slider-thumb{margin-top:auto;margin-left:-.3125rem}input[type=range].slider.is-large:not([orient=vertical]){min-height:calc((1.5rem + 2px) * 1.25)}input[type=range].slider.is-large:not([orient=vertical])::-webkit-slider-runnable-track{height:.75rem}input[type=range].slider.is-large:not([orient=vertical])::-moz-range-track{height:.75rem}input[type=range].slider.is-large:not([orient=vertical])::-ms-track{height:.75rem}input[type=range].slider.is-large[orient=vertical]::-webkit-slider-runnable-track{width:.75rem}input[type=range].slider.is-large[orient=vertical]::-moz-range-track{width:.75rem}input[type=range].slider.is-large[orient=vertical]::-ms-track{width:.75rem}input[type=range].slider.is-large::-webkit-slider-thumb{height:1.5rem;width:1.5rem}input[type=range].slider.is-large::-moz-range-thumb{height:1.5rem;width:1.5rem}input[type=range].slider.is-large::-ms-thumb{height:1.5rem;width:1.5rem}input[type=range].slider.is-large::-ms-thumb{margin-top:0}input[type=range].slider.is-large::-webkit-slider-thumb{margin-top:-.375rem}input[type=range].slider.is-large[orient=vertical]::-webkit-slider-thumb{margin-top:auto;margin-left:-.375rem}input[type=range].slider.is-white::-moz-range-track{background:#fff!important}input[type=range].slider.is-white::-webkit-slider-runnable-track{background:#fff!important}input[type=range].slider.is-white::-ms-track{background:#fff!important}input[type=range].slider.is-white::-ms-fill-lower{background:#fff}input[type=range].slider.is-white::-ms-fill-upper{background:#fff}input[type=range].slider.is-white .has-output-tooltip+output,input[type=range].slider.is-white.has-output+output{background-color:#fff;color:#0a0a0a}input[type=range].slider.is-black::-moz-range-track{background:#0a0a0a!important}input[type=range].slider.is-black::-webkit-slider-runnable-track{background:#0a0a0a!important}input[type=range].slider.is-black::-ms-track{background:#0a0a0a!important}input[type=range].slider.is-black::-ms-fill-lower{background:#0a0a0a}input[type=range].slider.is-black::-ms-fill-upper{background:#0a0a0a}input[type=range].slider.is-black .has-output-tooltip+output,input[type=range].slider.is-black.has-output+output{background-color:#0a0a0a;color:#fff}input[type=range].slider.is-light::-moz-range-track{background:#f5f5f5!important}input[type=range].slider.is-light::-webkit-slider-runnable-track{background:#f5f5f5!important}input[type=range].slider.is-light::-ms-track{background:#f5f5f5!important}input[type=range].slider.is-light::-ms-fill-lower{background:#f5f5f5}input[type=range].slider.is-light::-ms-fill-upper{background:#f5f5f5}input[type=range].slider.is-light .has-output-tooltip+output,input[type=range].slider.is-light.has-output+output{background-color:#f5f5f5;color:#363636}input[type=range].slider.is-dark::-moz-range-track{background:#363636!important}input[type=range].slider.is-dark::-webkit-slider-runnable-track{background:#363636!important}input[type=range].slider.is-dark::-ms-track{background:#363636!important}input[type=range].slider.is-dark::-ms-fill-lower{background:#363636}input[type=range].slider.is-dark::-ms-fill-upper{background:#363636}input[type=range].slider.is-dark .has-output-tooltip+output,input[type=range].slider.is-dark.has-output+output{background-color:#363636;color:#f5f5f5}input[type=range].slider.is-primary::-moz-range-track{background:#00d1b2!important}input[type=range].slider.is-primary::-webkit-slider-runnable-track{background:#00d1b2!important}input[type=range].slider.is-primary::-ms-track{background:#00d1b2!important}input[type=range].slider.is-primary::-ms-fill-lower{background:#00d1b2}input[type=range].slider.is-primary::-ms-fill-upper{background:#00d1b2}input[type=range].slider.is-primary .has-output-tooltip+output,input[type=range].slider.is-primary.has-output+output{background-color:#00d1b2;color:#fff}input[type=range].slider.is-link::-moz-range-track{background:#3273dc!important}input[type=range].slider.is-link::-webkit-slider-runnable-track{background:#3273dc!important}input[type=range].slider.is-link::-ms-track{background:#3273dc!important}input[type=range].slider.is-link::-ms-fill-lower{background:#3273dc}input[type=range].slider.is-link::-ms-fill-upper{background:#3273dc}input[type=range].slider.is-link .has-output-tooltip+output,input[type=range].slider.is-link.has-output+output{background-color:#3273dc;color:#fff}input[type=range].slider.is-info::-moz-range-track{background:#209cee!important}input[type=range].slider.is-info::-webkit-slider-runnable-track{background:#209cee!important}input[type=range].slider.is-info::-ms-track{background:#209cee!important}input[type=range].slider.is-info::-ms-fill-lower{background:#209cee}input[type=range].slider.is-info::-ms-fill-upper{background:#209cee}input[type=range].slider.is-info .has-output-tooltip+output,input[type=range].slider.is-info.has-output+output{background-color:#209cee;color:#fff}input[type=range].slider.is-success::-moz-range-track{background:#23d160!important}input[type=range].slider.is-success::-webkit-slider-runnable-track{background:#23d160!important}input[type=range].slider.is-success::-ms-track{background:#23d160!important}input[type=range].slider.is-success::-ms-fill-lower{background:#23d160}input[type=range].slider.is-success::-ms-fill-upper{background:#23d160}input[type=range].slider.is-success .has-output-tooltip+output,input[type=range].slider.is-success.has-output+output{background-color:#23d160;color:#fff}input[type=range].slider.is-warning::-moz-range-track{background:#ffdd57!important}input[type=range].slider.is-warning::-webkit-slider-runnable-track{background:#ffdd57!important}input[type=range].slider.is-warning::-ms-track{background:#ffdd57!important}input[type=range].slider.is-warning::-ms-fill-lower{background:#ffdd57}input[type=range].slider.is-warning::-ms-fill-upper{background:#ffdd57}input[type=range].slider.is-warning .has-output-tooltip+output,input[type=range].slider.is-warning.has-output+output{background-color:#ffdd57;color:rgba(0,0,0,.7)}input[type=range].slider.is-danger::-moz-range-track{background:#ff3860!important}input[type=range].slider.is-danger::-webkit-slider-runnable-track{background:#ff3860!important}input[type=range].slider.is-danger::-ms-track{background:#ff3860!important}input[type=range].slider.is-danger::-ms-fill-lower{background:#ff3860}input[type=range].slider.is-danger::-ms-fill-upper{background:#ff3860}input[type=range].slider.is-danger .has-output-tooltip+output,input[type=range].slider.is-danger.has-output+output{background-color:#ff3860;color:#fff}
docs/static/css/bulma.min.css ADDED
The diff for this file is too large to render. See raw diff
 
docs/static/css/fontawesome.all.min.css ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ /*!
2
+ * Font Awesome Free 5.15.1 by @fontawesome - https://fontawesome.com
3
+ * License - https://fontawesome.com/license/free (Icons: CC BY 4.0, Fonts: SIL OFL 1.1, Code: MIT License)
4
+ */
5
+ .fa,.fab,.fad,.fal,.far,.fas{-moz-osx-font-smoothing:grayscale;-webkit-font-smoothing:antialiased;display:inline-block;font-style:normal;font-variant:normal;text-rendering:auto;line-height:1}.fa-lg{font-size:1.33333em;line-height:.75em;vertical-align:-.0667em}.fa-xs{font-size:.75em}.fa-sm{font-size:.875em}.fa-1x{font-size:1em}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-6x{font-size:6em}.fa-7x{font-size:7em}.fa-8x{font-size:8em}.fa-9x{font-size:9em}.fa-10x{font-size:10em}.fa-fw{text-align:center;width:1.25em}.fa-ul{list-style-type:none;margin-left:2.5em;padding-left:0}.fa-ul>li{position:relative}.fa-li{left:-2em;position:absolute;text-align:center;width:2em;line-height:inherit}.fa-border{border:.08em solid #eee;border-radius:.1em;padding:.2em .25em .15em}.fa-pull-left{float:left}.fa-pull-right{float:right}.fa.fa-pull-left,.fab.fa-pull-left,.fal.fa-pull-left,.far.fa-pull-left,.fas.fa-pull-left{margin-right:.3em}.fa.fa-pull-right,.fab.fa-pull-right,.fal.fa-pull-right,.far.fa-pull-right,.fas.fa-pull-right{margin-left:.3em}.fa-spin{-webkit-animation:fa-spin 2s linear infinite;animation:fa-spin 2s linear infinite}.fa-pulse{-webkit-animation:fa-spin 1s steps(8) infinite;animation:fa-spin 1s steps(8) infinite}@-webkit-keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(1turn);transform:rotate(1turn)}}@keyframes fa-spin{0%{-webkit-transform:rotate(0deg);transform:rotate(0deg)}to{-webkit-transform:rotate(1turn);transform:rotate(1turn)}}.fa-rotate-90{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=1)";-webkit-transform:rotate(90deg);transform:rotate(90deg)}.fa-rotate-180{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2)";-webkit-transform:rotate(180deg);transform:rotate(180deg)}.fa-rotate-270{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=3)";-webkit-transform:rotate(270deg);transform:rotate(270deg)}.fa-flip-horizontal{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)";-webkit-transform:scaleX(-1);transform:scaleX(-1)}.fa-flip-vertical{-webkit-transform:scaleY(-1);transform:scaleY(-1)}.fa-flip-both,.fa-flip-horizontal.fa-flip-vertical,.fa-flip-vertical{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)"}.fa-flip-both,.fa-flip-horizontal.fa-flip-vertical{-webkit-transform:scale(-1);transform:scale(-1)}:root .fa-flip-both,:root .fa-flip-horizontal,:root .fa-flip-vertical,:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270{-webkit-filter:none;filter:none}.fa-stack{display:inline-block;height:2em;line-height:2em;position:relative;vertical-align:middle;width:2.5em}.fa-stack-1x,.fa-stack-2x{left:0;position:absolute;text-align:center;width:100%}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-500px:before{content:"\f26e"}.fa-accessible-icon:before{content:"\f368"}.fa-accusoft:before{content:"\f369"}.fa-acquisitions-incorporated:before{content:"\f6af"}.fa-ad:before{content:"\f641"}.fa-address-book:before{content:"\f2b9"}.fa-address-card:before{content:"\f2bb"}.fa-adjust:before{content:"\f042"}.fa-adn:before{content:"\f170"}.fa-adversal:before{content:"\f36a"}.fa-affiliatetheme:before{content:"\f36b"}.fa-air-freshener:before{content:"\f5d0"}.fa-airbnb:before{content:"\f834"}.fa-algolia:before{content:"\f36c"}.fa-align-center:before{content:"\f037"}.fa-align-justify:before{content:"\f039"}.fa-align-left:before{content:"\f036"}.fa-align-right:before{content:"\f038"}.fa-alipay:before{content:"\f642"}.fa-allergies:before{content:"\f461"}.fa-amazon:before{content:"\f270"}.fa-amazon-pay:before{content:"\f42c"}.fa-ambulance:before{content:"\f0f9"}.fa-american-sign-language-interpreting:before{content:"\f2a3"}.fa-amilia:before{content:"\f36d"}.fa-anchor:before{content:"\f13d"}.fa-android:before{content:"\f17b"}.fa-angellist:before{content:"\f209"}.fa-angle-double-down:before{content:"\f103"}.fa-angle-double-left:before{content:"\f100"}.fa-angle-double-right:before{content:"\f101"}.fa-angle-double-up:before{content:"\f102"}.fa-angle-down:before{content:"\f107"}.fa-angle-left:before{content:"\f104"}.fa-angle-right:before{content:"\f105"}.fa-angle-up:before{content:"\f106"}.fa-angry:before{content:"\f556"}.fa-angrycreative:before{content:"\f36e"}.fa-angular:before{content:"\f420"}.fa-ankh:before{content:"\f644"}.fa-app-store:before{content:"\f36f"}.fa-app-store-ios:before{content:"\f370"}.fa-apper:before{content:"\f371"}.fa-apple:before{content:"\f179"}.fa-apple-alt:before{content:"\f5d1"}.fa-apple-pay:before{content:"\f415"}.fa-archive:before{content:"\f187"}.fa-archway:before{content:"\f557"}.fa-arrow-alt-circle-down:before{content:"\f358"}.fa-arrow-alt-circle-left:before{content:"\f359"}.fa-arrow-alt-circle-right:before{content:"\f35a"}.fa-arrow-alt-circle-up:before{content:"\f35b"}.fa-arrow-circle-down:before{content:"\f0ab"}.fa-arrow-circle-left:before{content:"\f0a8"}.fa-arrow-circle-right:before{content:"\f0a9"}.fa-arrow-circle-up:before{content:"\f0aa"}.fa-arrow-down:before{content:"\f063"}.fa-arrow-left:before{content:"\f060"}.fa-arrow-right:before{content:"\f061"}.fa-arrow-up:before{content:"\f062"}.fa-arrows-alt:before{content:"\f0b2"}.fa-arrows-alt-h:before{content:"\f337"}.fa-arrows-alt-v:before{content:"\f338"}.fa-artstation:before{content:"\f77a"}.fa-assistive-listening-systems:before{content:"\f2a2"}.fa-asterisk:before{content:"\f069"}.fa-asymmetrik:before{content:"\f372"}.fa-at:before{content:"\f1fa"}.fa-atlas:before{content:"\f558"}.fa-atlassian:before{content:"\f77b"}.fa-atom:before{content:"\f5d2"}.fa-audible:before{content:"\f373"}.fa-audio-description:before{content:"\f29e"}.fa-autoprefixer:before{content:"\f41c"}.fa-avianex:before{content:"\f374"}.fa-aviato:before{content:"\f421"}.fa-award:before{content:"\f559"}.fa-aws:before{content:"\f375"}.fa-baby:before{content:"\f77c"}.fa-baby-carriage:before{content:"\f77d"}.fa-backspace:before{content:"\f55a"}.fa-backward:before{content:"\f04a"}.fa-bacon:before{content:"\f7e5"}.fa-bacteria:before{content:"\e059"}.fa-bacterium:before{content:"\e05a"}.fa-bahai:before{content:"\f666"}.fa-balance-scale:before{content:"\f24e"}.fa-balance-scale-left:before{content:"\f515"}.fa-balance-scale-right:before{content:"\f516"}.fa-ban:before{content:"\f05e"}.fa-band-aid:before{content:"\f462"}.fa-bandcamp:before{content:"\f2d5"}.fa-barcode:before{content:"\f02a"}.fa-bars:before{content:"\f0c9"}.fa-baseball-ball:before{content:"\f433"}.fa-basketball-ball:before{content:"\f434"}.fa-bath:before{content:"\f2cd"}.fa-battery-empty:before{content:"\f244"}.fa-battery-full:before{content:"\f240"}.fa-battery-half:before{content:"\f242"}.fa-battery-quarter:before{content:"\f243"}.fa-battery-three-quarters:before{content:"\f241"}.fa-battle-net:before{content:"\f835"}.fa-bed:before{content:"\f236"}.fa-beer:before{content:"\f0fc"}.fa-behance:before{content:"\f1b4"}.fa-behance-square:before{content:"\f1b5"}.fa-bell:before{content:"\f0f3"}.fa-bell-slash:before{content:"\f1f6"}.fa-bezier-curve:before{content:"\f55b"}.fa-bible:before{content:"\f647"}.fa-bicycle:before{content:"\f206"}.fa-biking:before{content:"\f84a"}.fa-bimobject:before{content:"\f378"}.fa-binoculars:before{content:"\f1e5"}.fa-biohazard:before{content:"\f780"}.fa-birthday-cake:before{content:"\f1fd"}.fa-bitbucket:before{content:"\f171"}.fa-bitcoin:before{content:"\f379"}.fa-bity:before{content:"\f37a"}.fa-black-tie:before{content:"\f27e"}.fa-blackberry:before{content:"\f37b"}.fa-blender:before{content:"\f517"}.fa-blender-phone:before{content:"\f6b6"}.fa-blind:before{content:"\f29d"}.fa-blog:before{content:"\f781"}.fa-blogger:before{content:"\f37c"}.fa-blogger-b:before{content:"\f37d"}.fa-bluetooth:before{content:"\f293"}.fa-bluetooth-b:before{content:"\f294"}.fa-bold:before{content:"\f032"}.fa-bolt:before{content:"\f0e7"}.fa-bomb:before{content:"\f1e2"}.fa-bone:before{content:"\f5d7"}.fa-bong:before{content:"\f55c"}.fa-book:before{content:"\f02d"}.fa-book-dead:before{content:"\f6b7"}.fa-book-medical:before{content:"\f7e6"}.fa-book-open:before{content:"\f518"}.fa-book-reader:before{content:"\f5da"}.fa-bookmark:before{content:"\f02e"}.fa-bootstrap:before{content:"\f836"}.fa-border-all:before{content:"\f84c"}.fa-border-none:before{content:"\f850"}.fa-border-style:before{content:"\f853"}.fa-bowling-ball:before{content:"\f436"}.fa-box:before{content:"\f466"}.fa-box-open:before{content:"\f49e"}.fa-box-tissue:before{content:"\e05b"}.fa-boxes:before{content:"\f468"}.fa-braille:before{content:"\f2a1"}.fa-brain:before{content:"\f5dc"}.fa-bread-slice:before{content:"\f7ec"}.fa-briefcase:before{content:"\f0b1"}.fa-briefcase-medical:before{content:"\f469"}.fa-broadcast-tower:before{content:"\f519"}.fa-broom:before{content:"\f51a"}.fa-brush:before{content:"\f55d"}.fa-btc:before{content:"\f15a"}.fa-buffer:before{content:"\f837"}.fa-bug:before{content:"\f188"}.fa-building:before{content:"\f1ad"}.fa-bullhorn:before{content:"\f0a1"}.fa-bullseye:before{content:"\f140"}.fa-burn:before{content:"\f46a"}.fa-buromobelexperte:before{content:"\f37f"}.fa-bus:before{content:"\f207"}.fa-bus-alt:before{content:"\f55e"}.fa-business-time:before{content:"\f64a"}.fa-buy-n-large:before{content:"\f8a6"}.fa-buysellads:before{content:"\f20d"}.fa-calculator:before{content:"\f1ec"}.fa-calendar:before{content:"\f133"}.fa-calendar-alt:before{content:"\f073"}.fa-calendar-check:before{content:"\f274"}.fa-calendar-day:before{content:"\f783"}.fa-calendar-minus:before{content:"\f272"}.fa-calendar-plus:before{content:"\f271"}.fa-calendar-times:before{content:"\f273"}.fa-calendar-week:before{content:"\f784"}.fa-camera:before{content:"\f030"}.fa-camera-retro:before{content:"\f083"}.fa-campground:before{content:"\f6bb"}.fa-canadian-maple-leaf:before{content:"\f785"}.fa-candy-cane:before{content:"\f786"}.fa-cannabis:before{content:"\f55f"}.fa-capsules:before{content:"\f46b"}.fa-car:before{content:"\f1b9"}.fa-car-alt:before{content:"\f5de"}.fa-car-battery:before{content:"\f5df"}.fa-car-crash:before{content:"\f5e1"}.fa-car-side:before{content:"\f5e4"}.fa-caravan:before{content:"\f8ff"}.fa-caret-down:before{content:"\f0d7"}.fa-caret-left:before{content:"\f0d9"}.fa-caret-right:before{content:"\f0da"}.fa-caret-square-down:before{content:"\f150"}.fa-caret-square-left:before{content:"\f191"}.fa-caret-square-right:before{content:"\f152"}.fa-caret-square-up:before{content:"\f151"}.fa-caret-up:before{content:"\f0d8"}.fa-carrot:before{content:"\f787"}.fa-cart-arrow-down:before{content:"\f218"}.fa-cart-plus:before{content:"\f217"}.fa-cash-register:before{content:"\f788"}.fa-cat:before{content:"\f6be"}.fa-cc-amazon-pay:before{content:"\f42d"}.fa-cc-amex:before{content:"\f1f3"}.fa-cc-apple-pay:before{content:"\f416"}.fa-cc-diners-club:before{content:"\f24c"}.fa-cc-discover:before{content:"\f1f2"}.fa-cc-jcb:before{content:"\f24b"}.fa-cc-mastercard:before{content:"\f1f1"}.fa-cc-paypal:before{content:"\f1f4"}.fa-cc-stripe:before{content:"\f1f5"}.fa-cc-visa:before{content:"\f1f0"}.fa-centercode:before{content:"\f380"}.fa-centos:before{content:"\f789"}.fa-certificate:before{content:"\f0a3"}.fa-chair:before{content:"\f6c0"}.fa-chalkboard:before{content:"\f51b"}.fa-chalkboard-teacher:before{content:"\f51c"}.fa-charging-station:before{content:"\f5e7"}.fa-chart-area:before{content:"\f1fe"}.fa-chart-bar:before{content:"\f080"}.fa-chart-line:before{content:"\f201"}.fa-chart-pie:before{content:"\f200"}.fa-check:before{content:"\f00c"}.fa-check-circle:before{content:"\f058"}.fa-check-double:before{content:"\f560"}.fa-check-square:before{content:"\f14a"}.fa-cheese:before{content:"\f7ef"}.fa-chess:before{content:"\f439"}.fa-chess-bishop:before{content:"\f43a"}.fa-chess-board:before{content:"\f43c"}.fa-chess-king:before{content:"\f43f"}.fa-chess-knight:before{content:"\f441"}.fa-chess-pawn:before{content:"\f443"}.fa-chess-queen:before{content:"\f445"}.fa-chess-rook:before{content:"\f447"}.fa-chevron-circle-down:before{content:"\f13a"}.fa-chevron-circle-left:before{content:"\f137"}.fa-chevron-circle-right:before{content:"\f138"}.fa-chevron-circle-up:before{content:"\f139"}.fa-chevron-down:before{content:"\f078"}.fa-chevron-left:before{content:"\f053"}.fa-chevron-right:before{content:"\f054"}.fa-chevron-up:before{content:"\f077"}.fa-child:before{content:"\f1ae"}.fa-chrome:before{content:"\f268"}.fa-chromecast:before{content:"\f838"}.fa-church:before{content:"\f51d"}.fa-circle:before{content:"\f111"}.fa-circle-notch:before{content:"\f1ce"}.fa-city:before{content:"\f64f"}.fa-clinic-medical:before{content:"\f7f2"}.fa-clipboard:before{content:"\f328"}.fa-clipboard-check:before{content:"\f46c"}.fa-clipboard-list:before{content:"\f46d"}.fa-clock:before{content:"\f017"}.fa-clone:before{content:"\f24d"}.fa-closed-captioning:before{content:"\f20a"}.fa-cloud:before{content:"\f0c2"}.fa-cloud-download-alt:before{content:"\f381"}.fa-cloud-meatball:before{content:"\f73b"}.fa-cloud-moon:before{content:"\f6c3"}.fa-cloud-moon-rain:before{content:"\f73c"}.fa-cloud-rain:before{content:"\f73d"}.fa-cloud-showers-heavy:before{content:"\f740"}.fa-cloud-sun:before{content:"\f6c4"}.fa-cloud-sun-rain:before{content:"\f743"}.fa-cloud-upload-alt:before{content:"\f382"}.fa-cloudflare:before{content:"\e07d"}.fa-cloudscale:before{content:"\f383"}.fa-cloudsmith:before{content:"\f384"}.fa-cloudversify:before{content:"\f385"}.fa-cocktail:before{content:"\f561"}.fa-code:before{content:"\f121"}.fa-code-branch:before{content:"\f126"}.fa-codepen:before{content:"\f1cb"}.fa-codiepie:before{content:"\f284"}.fa-coffee:before{content:"\f0f4"}.fa-cog:before{content:"\f013"}.fa-cogs:before{content:"\f085"}.fa-coins:before{content:"\f51e"}.fa-columns:before{content:"\f0db"}.fa-comment:before{content:"\f075"}.fa-comment-alt:before{content:"\f27a"}.fa-comment-dollar:before{content:"\f651"}.fa-comment-dots:before{content:"\f4ad"}.fa-comment-medical:before{content:"\f7f5"}.fa-comment-slash:before{content:"\f4b3"}.fa-comments:before{content:"\f086"}.fa-comments-dollar:before{content:"\f653"}.fa-compact-disc:before{content:"\f51f"}.fa-compass:before{content:"\f14e"}.fa-compress:before{content:"\f066"}.fa-compress-alt:before{content:"\f422"}.fa-compress-arrows-alt:before{content:"\f78c"}.fa-concierge-bell:before{content:"\f562"}.fa-confluence:before{content:"\f78d"}.fa-connectdevelop:before{content:"\f20e"}.fa-contao:before{content:"\f26d"}.fa-cookie:before{content:"\f563"}.fa-cookie-bite:before{content:"\f564"}.fa-copy:before{content:"\f0c5"}.fa-copyright:before{content:"\f1f9"}.fa-cotton-bureau:before{content:"\f89e"}.fa-couch:before{content:"\f4b8"}.fa-cpanel:before{content:"\f388"}.fa-creative-commons:before{content:"\f25e"}.fa-creative-commons-by:before{content:"\f4e7"}.fa-creative-commons-nc:before{content:"\f4e8"}.fa-creative-commons-nc-eu:before{content:"\f4e9"}.fa-creative-commons-nc-jp:before{content:"\f4ea"}.fa-creative-commons-nd:before{content:"\f4eb"}.fa-creative-commons-pd:before{content:"\f4ec"}.fa-creative-commons-pd-alt:before{content:"\f4ed"}.fa-creative-commons-remix:before{content:"\f4ee"}.fa-creative-commons-sa:before{content:"\f4ef"}.fa-creative-commons-sampling:before{content:"\f4f0"}.fa-creative-commons-sampling-plus:before{content:"\f4f1"}.fa-creative-commons-share:before{content:"\f4f2"}.fa-creative-commons-zero:before{content:"\f4f3"}.fa-credit-card:before{content:"\f09d"}.fa-critical-role:before{content:"\f6c9"}.fa-crop:before{content:"\f125"}.fa-crop-alt:before{content:"\f565"}.fa-cross:before{content:"\f654"}.fa-crosshairs:before{content:"\f05b"}.fa-crow:before{content:"\f520"}.fa-crown:before{content:"\f521"}.fa-crutch:before{content:"\f7f7"}.fa-css3:before{content:"\f13c"}.fa-css3-alt:before{content:"\f38b"}.fa-cube:before{content:"\f1b2"}.fa-cubes:before{content:"\f1b3"}.fa-cut:before{content:"\f0c4"}.fa-cuttlefish:before{content:"\f38c"}.fa-d-and-d:before{content:"\f38d"}.fa-d-and-d-beyond:before{content:"\f6ca"}.fa-dailymotion:before{content:"\e052"}.fa-dashcube:before{content:"\f210"}.fa-database:before{content:"\f1c0"}.fa-deaf:before{content:"\f2a4"}.fa-deezer:before{content:"\e077"}.fa-delicious:before{content:"\f1a5"}.fa-democrat:before{content:"\f747"}.fa-deploydog:before{content:"\f38e"}.fa-deskpro:before{content:"\f38f"}.fa-desktop:before{content:"\f108"}.fa-dev:before{content:"\f6cc"}.fa-deviantart:before{content:"\f1bd"}.fa-dharmachakra:before{content:"\f655"}.fa-dhl:before{content:"\f790"}.fa-diagnoses:before{content:"\f470"}.fa-diaspora:before{content:"\f791"}.fa-dice:before{content:"\f522"}.fa-dice-d20:before{content:"\f6cf"}.fa-dice-d6:before{content:"\f6d1"}.fa-dice-five:before{content:"\f523"}.fa-dice-four:before{content:"\f524"}.fa-dice-one:before{content:"\f525"}.fa-dice-six:before{content:"\f526"}.fa-dice-three:before{content:"\f527"}.fa-dice-two:before{content:"\f528"}.fa-digg:before{content:"\f1a6"}.fa-digital-ocean:before{content:"\f391"}.fa-digital-tachograph:before{content:"\f566"}.fa-directions:before{content:"\f5eb"}.fa-discord:before{content:"\f392"}.fa-discourse:before{content:"\f393"}.fa-disease:before{content:"\f7fa"}.fa-divide:before{content:"\f529"}.fa-dizzy:before{content:"\f567"}.fa-dna:before{content:"\f471"}.fa-dochub:before{content:"\f394"}.fa-docker:before{content:"\f395"}.fa-dog:before{content:"\f6d3"}.fa-dollar-sign:before{content:"\f155"}.fa-dolly:before{content:"\f472"}.fa-dolly-flatbed:before{content:"\f474"}.fa-donate:before{content:"\f4b9"}.fa-door-closed:before{content:"\f52a"}.fa-door-open:before{content:"\f52b"}.fa-dot-circle:before{content:"\f192"}.fa-dove:before{content:"\f4ba"}.fa-download:before{content:"\f019"}.fa-draft2digital:before{content:"\f396"}.fa-drafting-compass:before{content:"\f568"}.fa-dragon:before{content:"\f6d5"}.fa-draw-polygon:before{content:"\f5ee"}.fa-dribbble:before{content:"\f17d"}.fa-dribbble-square:before{content:"\f397"}.fa-dropbox:before{content:"\f16b"}.fa-drum:before{content:"\f569"}.fa-drum-steelpan:before{content:"\f56a"}.fa-drumstick-bite:before{content:"\f6d7"}.fa-drupal:before{content:"\f1a9"}.fa-dumbbell:before{content:"\f44b"}.fa-dumpster:before{content:"\f793"}.fa-dumpster-fire:before{content:"\f794"}.fa-dungeon:before{content:"\f6d9"}.fa-dyalog:before{content:"\f399"}.fa-earlybirds:before{content:"\f39a"}.fa-ebay:before{content:"\f4f4"}.fa-edge:before{content:"\f282"}.fa-edge-legacy:before{content:"\e078"}.fa-edit:before{content:"\f044"}.fa-egg:before{content:"\f7fb"}.fa-eject:before{content:"\f052"}.fa-elementor:before{content:"\f430"}.fa-ellipsis-h:before{content:"\f141"}.fa-ellipsis-v:before{content:"\f142"}.fa-ello:before{content:"\f5f1"}.fa-ember:before{content:"\f423"}.fa-empire:before{content:"\f1d1"}.fa-envelope:before{content:"\f0e0"}.fa-envelope-open:before{content:"\f2b6"}.fa-envelope-open-text:before{content:"\f658"}.fa-envelope-square:before{content:"\f199"}.fa-envira:before{content:"\f299"}.fa-equals:before{content:"\f52c"}.fa-eraser:before{content:"\f12d"}.fa-erlang:before{content:"\f39d"}.fa-ethereum:before{content:"\f42e"}.fa-ethernet:before{content:"\f796"}.fa-etsy:before{content:"\f2d7"}.fa-euro-sign:before{content:"\f153"}.fa-evernote:before{content:"\f839"}.fa-exchange-alt:before{content:"\f362"}.fa-exclamation:before{content:"\f12a"}.fa-exclamation-circle:before{content:"\f06a"}.fa-exclamation-triangle:before{content:"\f071"}.fa-expand:before{content:"\f065"}.fa-expand-alt:before{content:"\f424"}.fa-expand-arrows-alt:before{content:"\f31e"}.fa-expeditedssl:before{content:"\f23e"}.fa-external-link-alt:before{content:"\f35d"}.fa-external-link-square-alt:before{content:"\f360"}.fa-eye:before{content:"\f06e"}.fa-eye-dropper:before{content:"\f1fb"}.fa-eye-slash:before{content:"\f070"}.fa-facebook:before{content:"\f09a"}.fa-facebook-f:before{content:"\f39e"}.fa-facebook-messenger:before{content:"\f39f"}.fa-facebook-square:before{content:"\f082"}.fa-fan:before{content:"\f863"}.fa-fantasy-flight-games:before{content:"\f6dc"}.fa-fast-backward:before{content:"\f049"}.fa-fast-forward:before{content:"\f050"}.fa-faucet:before{content:"\e005"}.fa-fax:before{content:"\f1ac"}.fa-feather:before{content:"\f52d"}.fa-feather-alt:before{content:"\f56b"}.fa-fedex:before{content:"\f797"}.fa-fedora:before{content:"\f798"}.fa-female:before{content:"\f182"}.fa-fighter-jet:before{content:"\f0fb"}.fa-figma:before{content:"\f799"}.fa-file:before{content:"\f15b"}.fa-file-alt:before{content:"\f15c"}.fa-file-archive:before{content:"\f1c6"}.fa-file-audio:before{content:"\f1c7"}.fa-file-code:before{content:"\f1c9"}.fa-file-contract:before{content:"\f56c"}.fa-file-csv:before{content:"\f6dd"}.fa-file-download:before{content:"\f56d"}.fa-file-excel:before{content:"\f1c3"}.fa-file-export:before{content:"\f56e"}.fa-file-image:before{content:"\f1c5"}.fa-file-import:before{content:"\f56f"}.fa-file-invoice:before{content:"\f570"}.fa-file-invoice-dollar:before{content:"\f571"}.fa-file-medical:before{content:"\f477"}.fa-file-medical-alt:before{content:"\f478"}.fa-file-pdf:before{content:"\f1c1"}.fa-file-powerpoint:before{content:"\f1c4"}.fa-file-prescription:before{content:"\f572"}.fa-file-signature:before{content:"\f573"}.fa-file-upload:before{content:"\f574"}.fa-file-video:before{content:"\f1c8"}.fa-file-word:before{content:"\f1c2"}.fa-fill:before{content:"\f575"}.fa-fill-drip:before{content:"\f576"}.fa-film:before{content:"\f008"}.fa-filter:before{content:"\f0b0"}.fa-fingerprint:before{content:"\f577"}.fa-fire:before{content:"\f06d"}.fa-fire-alt:before{content:"\f7e4"}.fa-fire-extinguisher:before{content:"\f134"}.fa-firefox:before{content:"\f269"}.fa-firefox-browser:before{content:"\e007"}.fa-first-aid:before{content:"\f479"}.fa-first-order:before{content:"\f2b0"}.fa-first-order-alt:before{content:"\f50a"}.fa-firstdraft:before{content:"\f3a1"}.fa-fish:before{content:"\f578"}.fa-fist-raised:before{content:"\f6de"}.fa-flag:before{content:"\f024"}.fa-flag-checkered:before{content:"\f11e"}.fa-flag-usa:before{content:"\f74d"}.fa-flask:before{content:"\f0c3"}.fa-flickr:before{content:"\f16e"}.fa-flipboard:before{content:"\f44d"}.fa-flushed:before{content:"\f579"}.fa-fly:before{content:"\f417"}.fa-folder:before{content:"\f07b"}.fa-folder-minus:before{content:"\f65d"}.fa-folder-open:before{content:"\f07c"}.fa-folder-plus:before{content:"\f65e"}.fa-font:before{content:"\f031"}.fa-font-awesome:before{content:"\f2b4"}.fa-font-awesome-alt:before{content:"\f35c"}.fa-font-awesome-flag:before{content:"\f425"}.fa-font-awesome-logo-full:before{content:"\f4e6"}.fa-fonticons:before{content:"\f280"}.fa-fonticons-fi:before{content:"\f3a2"}.fa-football-ball:before{content:"\f44e"}.fa-fort-awesome:before{content:"\f286"}.fa-fort-awesome-alt:before{content:"\f3a3"}.fa-forumbee:before{content:"\f211"}.fa-forward:before{content:"\f04e"}.fa-foursquare:before{content:"\f180"}.fa-free-code-camp:before{content:"\f2c5"}.fa-freebsd:before{content:"\f3a4"}.fa-frog:before{content:"\f52e"}.fa-frown:before{content:"\f119"}.fa-frown-open:before{content:"\f57a"}.fa-fulcrum:before{content:"\f50b"}.fa-funnel-dollar:before{content:"\f662"}.fa-futbol:before{content:"\f1e3"}.fa-galactic-republic:before{content:"\f50c"}.fa-galactic-senate:before{content:"\f50d"}.fa-gamepad:before{content:"\f11b"}.fa-gas-pump:before{content:"\f52f"}.fa-gavel:before{content:"\f0e3"}.fa-gem:before{content:"\f3a5"}.fa-genderless:before{content:"\f22d"}.fa-get-pocket:before{content:"\f265"}.fa-gg:before{content:"\f260"}.fa-gg-circle:before{content:"\f261"}.fa-ghost:before{content:"\f6e2"}.fa-gift:before{content:"\f06b"}.fa-gifts:before{content:"\f79c"}.fa-git:before{content:"\f1d3"}.fa-git-alt:before{content:"\f841"}.fa-git-square:before{content:"\f1d2"}.fa-github:before{content:"\f09b"}.fa-github-alt:before{content:"\f113"}.fa-github-square:before{content:"\f092"}.fa-gitkraken:before{content:"\f3a6"}.fa-gitlab:before{content:"\f296"}.fa-gitter:before{content:"\f426"}.fa-glass-cheers:before{content:"\f79f"}.fa-glass-martini:before{content:"\f000"}.fa-glass-martini-alt:before{content:"\f57b"}.fa-glass-whiskey:before{content:"\f7a0"}.fa-glasses:before{content:"\f530"}.fa-glide:before{content:"\f2a5"}.fa-glide-g:before{content:"\f2a6"}.fa-globe:before{content:"\f0ac"}.fa-globe-africa:before{content:"\f57c"}.fa-globe-americas:before{content:"\f57d"}.fa-globe-asia:before{content:"\f57e"}.fa-globe-europe:before{content:"\f7a2"}.fa-gofore:before{content:"\f3a7"}.fa-golf-ball:before{content:"\f450"}.fa-goodreads:before{content:"\f3a8"}.fa-goodreads-g:before{content:"\f3a9"}.fa-google:before{content:"\f1a0"}.fa-google-drive:before{content:"\f3aa"}.fa-google-pay:before{content:"\e079"}.fa-google-play:before{content:"\f3ab"}.fa-google-plus:before{content:"\f2b3"}.fa-google-plus-g:before{content:"\f0d5"}.fa-google-plus-square:before{content:"\f0d4"}.fa-google-wallet:before{content:"\f1ee"}.fa-gopuram:before{content:"\f664"}.fa-graduation-cap:before{content:"\f19d"}.fa-gratipay:before{content:"\f184"}.fa-grav:before{content:"\f2d6"}.fa-greater-than:before{content:"\f531"}.fa-greater-than-equal:before{content:"\f532"}.fa-grimace:before{content:"\f57f"}.fa-grin:before{content:"\f580"}.fa-grin-alt:before{content:"\f581"}.fa-grin-beam:before{content:"\f582"}.fa-grin-beam-sweat:before{content:"\f583"}.fa-grin-hearts:before{content:"\f584"}.fa-grin-squint:before{content:"\f585"}.fa-grin-squint-tears:before{content:"\f586"}.fa-grin-stars:before{content:"\f587"}.fa-grin-tears:before{content:"\f588"}.fa-grin-tongue:before{content:"\f589"}.fa-grin-tongue-squint:before{content:"\f58a"}.fa-grin-tongue-wink:before{content:"\f58b"}.fa-grin-wink:before{content:"\f58c"}.fa-grip-horizontal:before{content:"\f58d"}.fa-grip-lines:before{content:"\f7a4"}.fa-grip-lines-vertical:before{content:"\f7a5"}.fa-grip-vertical:before{content:"\f58e"}.fa-gripfire:before{content:"\f3ac"}.fa-grunt:before{content:"\f3ad"}.fa-guilded:before{content:"\e07e"}.fa-guitar:before{content:"\f7a6"}.fa-gulp:before{content:"\f3ae"}.fa-h-square:before{content:"\f0fd"}.fa-hacker-news:before{content:"\f1d4"}.fa-hacker-news-square:before{content:"\f3af"}.fa-hackerrank:before{content:"\f5f7"}.fa-hamburger:before{content:"\f805"}.fa-hammer:before{content:"\f6e3"}.fa-hamsa:before{content:"\f665"}.fa-hand-holding:before{content:"\f4bd"}.fa-hand-holding-heart:before{content:"\f4be"}.fa-hand-holding-medical:before{content:"\e05c"}.fa-hand-holding-usd:before{content:"\f4c0"}.fa-hand-holding-water:before{content:"\f4c1"}.fa-hand-lizard:before{content:"\f258"}.fa-hand-middle-finger:before{content:"\f806"}.fa-hand-paper:before{content:"\f256"}.fa-hand-peace:before{content:"\f25b"}.fa-hand-point-down:before{content:"\f0a7"}.fa-hand-point-left:before{content:"\f0a5"}.fa-hand-point-right:before{content:"\f0a4"}.fa-hand-point-up:before{content:"\f0a6"}.fa-hand-pointer:before{content:"\f25a"}.fa-hand-rock:before{content:"\f255"}.fa-hand-scissors:before{content:"\f257"}.fa-hand-sparkles:before{content:"\e05d"}.fa-hand-spock:before{content:"\f259"}.fa-hands:before{content:"\f4c2"}.fa-hands-helping:before{content:"\f4c4"}.fa-hands-wash:before{content:"\e05e"}.fa-handshake:before{content:"\f2b5"}.fa-handshake-alt-slash:before{content:"\e05f"}.fa-handshake-slash:before{content:"\e060"}.fa-hanukiah:before{content:"\f6e6"}.fa-hard-hat:before{content:"\f807"}.fa-hashtag:before{content:"\f292"}.fa-hat-cowboy:before{content:"\f8c0"}.fa-hat-cowboy-side:before{content:"\f8c1"}.fa-hat-wizard:before{content:"\f6e8"}.fa-hdd:before{content:"\f0a0"}.fa-head-side-cough:before{content:"\e061"}.fa-head-side-cough-slash:before{content:"\e062"}.fa-head-side-mask:before{content:"\e063"}.fa-head-side-virus:before{content:"\e064"}.fa-heading:before{content:"\f1dc"}.fa-headphones:before{content:"\f025"}.fa-headphones-alt:before{content:"\f58f"}.fa-headset:before{content:"\f590"}.fa-heart:before{content:"\f004"}.fa-heart-broken:before{content:"\f7a9"}.fa-heartbeat:before{content:"\f21e"}.fa-helicopter:before{content:"\f533"}.fa-highlighter:before{content:"\f591"}.fa-hiking:before{content:"\f6ec"}.fa-hippo:before{content:"\f6ed"}.fa-hips:before{content:"\f452"}.fa-hire-a-helper:before{content:"\f3b0"}.fa-history:before{content:"\f1da"}.fa-hive:before{content:"\e07f"}.fa-hockey-puck:before{content:"\f453"}.fa-holly-berry:before{content:"\f7aa"}.fa-home:before{content:"\f015"}.fa-hooli:before{content:"\f427"}.fa-hornbill:before{content:"\f592"}.fa-horse:before{content:"\f6f0"}.fa-horse-head:before{content:"\f7ab"}.fa-hospital:before{content:"\f0f8"}.fa-hospital-alt:before{content:"\f47d"}.fa-hospital-symbol:before{content:"\f47e"}.fa-hospital-user:before{content:"\f80d"}.fa-hot-tub:before{content:"\f593"}.fa-hotdog:before{content:"\f80f"}.fa-hotel:before{content:"\f594"}.fa-hotjar:before{content:"\f3b1"}.fa-hourglass:before{content:"\f254"}.fa-hourglass-end:before{content:"\f253"}.fa-hourglass-half:before{content:"\f252"}.fa-hourglass-start:before{content:"\f251"}.fa-house-damage:before{content:"\f6f1"}.fa-house-user:before{content:"\e065"}.fa-houzz:before{content:"\f27c"}.fa-hryvnia:before{content:"\f6f2"}.fa-html5:before{content:"\f13b"}.fa-hubspot:before{content:"\f3b2"}.fa-i-cursor:before{content:"\f246"}.fa-ice-cream:before{content:"\f810"}.fa-icicles:before{content:"\f7ad"}.fa-icons:before{content:"\f86d"}.fa-id-badge:before{content:"\f2c1"}.fa-id-card:before{content:"\f2c2"}.fa-id-card-alt:before{content:"\f47f"}.fa-ideal:before{content:"\e013"}.fa-igloo:before{content:"\f7ae"}.fa-image:before{content:"\f03e"}.fa-images:before{content:"\f302"}.fa-imdb:before{content:"\f2d8"}.fa-inbox:before{content:"\f01c"}.fa-indent:before{content:"\f03c"}.fa-industry:before{content:"\f275"}.fa-infinity:before{content:"\f534"}.fa-info:before{content:"\f129"}.fa-info-circle:before{content:"\f05a"}.fa-innosoft:before{content:"\e080"}.fa-instagram:before{content:"\f16d"}.fa-instagram-square:before{content:"\e055"}.fa-instalod:before{content:"\e081"}.fa-intercom:before{content:"\f7af"}.fa-internet-explorer:before{content:"\f26b"}.fa-invision:before{content:"\f7b0"}.fa-ioxhost:before{content:"\f208"}.fa-italic:before{content:"\f033"}.fa-itch-io:before{content:"\f83a"}.fa-itunes:before{content:"\f3b4"}.fa-itunes-note:before{content:"\f3b5"}.fa-java:before{content:"\f4e4"}.fa-jedi:before{content:"\f669"}.fa-jedi-order:before{content:"\f50e"}.fa-jenkins:before{content:"\f3b6"}.fa-jira:before{content:"\f7b1"}.fa-joget:before{content:"\f3b7"}.fa-joint:before{content:"\f595"}.fa-joomla:before{content:"\f1aa"}.fa-journal-whills:before{content:"\f66a"}.fa-js:before{content:"\f3b8"}.fa-js-square:before{content:"\f3b9"}.fa-jsfiddle:before{content:"\f1cc"}.fa-kaaba:before{content:"\f66b"}.fa-kaggle:before{content:"\f5fa"}.fa-key:before{content:"\f084"}.fa-keybase:before{content:"\f4f5"}.fa-keyboard:before{content:"\f11c"}.fa-keycdn:before{content:"\f3ba"}.fa-khanda:before{content:"\f66d"}.fa-kickstarter:before{content:"\f3bb"}.fa-kickstarter-k:before{content:"\f3bc"}.fa-kiss:before{content:"\f596"}.fa-kiss-beam:before{content:"\f597"}.fa-kiss-wink-heart:before{content:"\f598"}.fa-kiwi-bird:before{content:"\f535"}.fa-korvue:before{content:"\f42f"}.fa-landmark:before{content:"\f66f"}.fa-language:before{content:"\f1ab"}.fa-laptop:before{content:"\f109"}.fa-laptop-code:before{content:"\f5fc"}.fa-laptop-house:before{content:"\e066"}.fa-laptop-medical:before{content:"\f812"}.fa-laravel:before{content:"\f3bd"}.fa-lastfm:before{content:"\f202"}.fa-lastfm-square:before{content:"\f203"}.fa-laugh:before{content:"\f599"}.fa-laugh-beam:before{content:"\f59a"}.fa-laugh-squint:before{content:"\f59b"}.fa-laugh-wink:before{content:"\f59c"}.fa-layer-group:before{content:"\f5fd"}.fa-leaf:before{content:"\f06c"}.fa-leanpub:before{content:"\f212"}.fa-lemon:before{content:"\f094"}.fa-less:before{content:"\f41d"}.fa-less-than:before{content:"\f536"}.fa-less-than-equal:before{content:"\f537"}.fa-level-down-alt:before{content:"\f3be"}.fa-level-up-alt:before{content:"\f3bf"}.fa-life-ring:before{content:"\f1cd"}.fa-lightbulb:before{content:"\f0eb"}.fa-line:before{content:"\f3c0"}.fa-link:before{content:"\f0c1"}.fa-linkedin:before{content:"\f08c"}.fa-linkedin-in:before{content:"\f0e1"}.fa-linode:before{content:"\f2b8"}.fa-linux:before{content:"\f17c"}.fa-lira-sign:before{content:"\f195"}.fa-list:before{content:"\f03a"}.fa-list-alt:before{content:"\f022"}.fa-list-ol:before{content:"\f0cb"}.fa-list-ul:before{content:"\f0ca"}.fa-location-arrow:before{content:"\f124"}.fa-lock:before{content:"\f023"}.fa-lock-open:before{content:"\f3c1"}.fa-long-arrow-alt-down:before{content:"\f309"}.fa-long-arrow-alt-left:before{content:"\f30a"}.fa-long-arrow-alt-right:before{content:"\f30b"}.fa-long-arrow-alt-up:before{content:"\f30c"}.fa-low-vision:before{content:"\f2a8"}.fa-luggage-cart:before{content:"\f59d"}.fa-lungs:before{content:"\f604"}.fa-lungs-virus:before{content:"\e067"}.fa-lyft:before{content:"\f3c3"}.fa-magento:before{content:"\f3c4"}.fa-magic:before{content:"\f0d0"}.fa-magnet:before{content:"\f076"}.fa-mail-bulk:before{content:"\f674"}.fa-mailchimp:before{content:"\f59e"}.fa-male:before{content:"\f183"}.fa-mandalorian:before{content:"\f50f"}.fa-map:before{content:"\f279"}.fa-map-marked:before{content:"\f59f"}.fa-map-marked-alt:before{content:"\f5a0"}.fa-map-marker:before{content:"\f041"}.fa-map-marker-alt:before{content:"\f3c5"}.fa-map-pin:before{content:"\f276"}.fa-map-signs:before{content:"\f277"}.fa-markdown:before{content:"\f60f"}.fa-marker:before{content:"\f5a1"}.fa-mars:before{content:"\f222"}.fa-mars-double:before{content:"\f227"}.fa-mars-stroke:before{content:"\f229"}.fa-mars-stroke-h:before{content:"\f22b"}.fa-mars-stroke-v:before{content:"\f22a"}.fa-mask:before{content:"\f6fa"}.fa-mastodon:before{content:"\f4f6"}.fa-maxcdn:before{content:"\f136"}.fa-mdb:before{content:"\f8ca"}.fa-medal:before{content:"\f5a2"}.fa-medapps:before{content:"\f3c6"}.fa-medium:before{content:"\f23a"}.fa-medium-m:before{content:"\f3c7"}.fa-medkit:before{content:"\f0fa"}.fa-medrt:before{content:"\f3c8"}.fa-meetup:before{content:"\f2e0"}.fa-megaport:before{content:"\f5a3"}.fa-meh:before{content:"\f11a"}.fa-meh-blank:before{content:"\f5a4"}.fa-meh-rolling-eyes:before{content:"\f5a5"}.fa-memory:before{content:"\f538"}.fa-mendeley:before{content:"\f7b3"}.fa-menorah:before{content:"\f676"}.fa-mercury:before{content:"\f223"}.fa-meteor:before{content:"\f753"}.fa-microblog:before{content:"\e01a"}.fa-microchip:before{content:"\f2db"}.fa-microphone:before{content:"\f130"}.fa-microphone-alt:before{content:"\f3c9"}.fa-microphone-alt-slash:before{content:"\f539"}.fa-microphone-slash:before{content:"\f131"}.fa-microscope:before{content:"\f610"}.fa-microsoft:before{content:"\f3ca"}.fa-minus:before{content:"\f068"}.fa-minus-circle:before{content:"\f056"}.fa-minus-square:before{content:"\f146"}.fa-mitten:before{content:"\f7b5"}.fa-mix:before{content:"\f3cb"}.fa-mixcloud:before{content:"\f289"}.fa-mixer:before{content:"\e056"}.fa-mizuni:before{content:"\f3cc"}.fa-mobile:before{content:"\f10b"}.fa-mobile-alt:before{content:"\f3cd"}.fa-modx:before{content:"\f285"}.fa-monero:before{content:"\f3d0"}.fa-money-bill:before{content:"\f0d6"}.fa-money-bill-alt:before{content:"\f3d1"}.fa-money-bill-wave:before{content:"\f53a"}.fa-money-bill-wave-alt:before{content:"\f53b"}.fa-money-check:before{content:"\f53c"}.fa-money-check-alt:before{content:"\f53d"}.fa-monument:before{content:"\f5a6"}.fa-moon:before{content:"\f186"}.fa-mortar-pestle:before{content:"\f5a7"}.fa-mosque:before{content:"\f678"}.fa-motorcycle:before{content:"\f21c"}.fa-mountain:before{content:"\f6fc"}.fa-mouse:before{content:"\f8cc"}.fa-mouse-pointer:before{content:"\f245"}.fa-mug-hot:before{content:"\f7b6"}.fa-music:before{content:"\f001"}.fa-napster:before{content:"\f3d2"}.fa-neos:before{content:"\f612"}.fa-network-wired:before{content:"\f6ff"}.fa-neuter:before{content:"\f22c"}.fa-newspaper:before{content:"\f1ea"}.fa-nimblr:before{content:"\f5a8"}.fa-node:before{content:"\f419"}.fa-node-js:before{content:"\f3d3"}.fa-not-equal:before{content:"\f53e"}.fa-notes-medical:before{content:"\f481"}.fa-npm:before{content:"\f3d4"}.fa-ns8:before{content:"\f3d5"}.fa-nutritionix:before{content:"\f3d6"}.fa-object-group:before{content:"\f247"}.fa-object-ungroup:before{content:"\f248"}.fa-octopus-deploy:before{content:"\e082"}.fa-odnoklassniki:before{content:"\f263"}.fa-odnoklassniki-square:before{content:"\f264"}.fa-oil-can:before{content:"\f613"}.fa-old-republic:before{content:"\f510"}.fa-om:before{content:"\f679"}.fa-opencart:before{content:"\f23d"}.fa-openid:before{content:"\f19b"}.fa-opera:before{content:"\f26a"}.fa-optin-monster:before{content:"\f23c"}.fa-orcid:before{content:"\f8d2"}.fa-osi:before{content:"\f41a"}.fa-otter:before{content:"\f700"}.fa-outdent:before{content:"\f03b"}.fa-page4:before{content:"\f3d7"}.fa-pagelines:before{content:"\f18c"}.fa-pager:before{content:"\f815"}.fa-paint-brush:before{content:"\f1fc"}.fa-paint-roller:before{content:"\f5aa"}.fa-palette:before{content:"\f53f"}.fa-palfed:before{content:"\f3d8"}.fa-pallet:before{content:"\f482"}.fa-paper-plane:before{content:"\f1d8"}.fa-paperclip:before{content:"\f0c6"}.fa-parachute-box:before{content:"\f4cd"}.fa-paragraph:before{content:"\f1dd"}.fa-parking:before{content:"\f540"}.fa-passport:before{content:"\f5ab"}.fa-pastafarianism:before{content:"\f67b"}.fa-paste:before{content:"\f0ea"}.fa-patreon:before{content:"\f3d9"}.fa-pause:before{content:"\f04c"}.fa-pause-circle:before{content:"\f28b"}.fa-paw:before{content:"\f1b0"}.fa-paypal:before{content:"\f1ed"}.fa-peace:before{content:"\f67c"}.fa-pen:before{content:"\f304"}.fa-pen-alt:before{content:"\f305"}.fa-pen-fancy:before{content:"\f5ac"}.fa-pen-nib:before{content:"\f5ad"}.fa-pen-square:before{content:"\f14b"}.fa-pencil-alt:before{content:"\f303"}.fa-pencil-ruler:before{content:"\f5ae"}.fa-penny-arcade:before{content:"\f704"}.fa-people-arrows:before{content:"\e068"}.fa-people-carry:before{content:"\f4ce"}.fa-pepper-hot:before{content:"\f816"}.fa-perbyte:before{content:"\e083"}.fa-percent:before{content:"\f295"}.fa-percentage:before{content:"\f541"}.fa-periscope:before{content:"\f3da"}.fa-person-booth:before{content:"\f756"}.fa-phabricator:before{content:"\f3db"}.fa-phoenix-framework:before{content:"\f3dc"}.fa-phoenix-squadron:before{content:"\f511"}.fa-phone:before{content:"\f095"}.fa-phone-alt:before{content:"\f879"}.fa-phone-slash:before{content:"\f3dd"}.fa-phone-square:before{content:"\f098"}.fa-phone-square-alt:before{content:"\f87b"}.fa-phone-volume:before{content:"\f2a0"}.fa-photo-video:before{content:"\f87c"}.fa-php:before{content:"\f457"}.fa-pied-piper:before{content:"\f2ae"}.fa-pied-piper-alt:before{content:"\f1a8"}.fa-pied-piper-hat:before{content:"\f4e5"}.fa-pied-piper-pp:before{content:"\f1a7"}.fa-pied-piper-square:before{content:"\e01e"}.fa-piggy-bank:before{content:"\f4d3"}.fa-pills:before{content:"\f484"}.fa-pinterest:before{content:"\f0d2"}.fa-pinterest-p:before{content:"\f231"}.fa-pinterest-square:before{content:"\f0d3"}.fa-pizza-slice:before{content:"\f818"}.fa-place-of-worship:before{content:"\f67f"}.fa-plane:before{content:"\f072"}.fa-plane-arrival:before{content:"\f5af"}.fa-plane-departure:before{content:"\f5b0"}.fa-plane-slash:before{content:"\e069"}.fa-play:before{content:"\f04b"}.fa-play-circle:before{content:"\f144"}.fa-playstation:before{content:"\f3df"}.fa-plug:before{content:"\f1e6"}.fa-plus:before{content:"\f067"}.fa-plus-circle:before{content:"\f055"}.fa-plus-square:before{content:"\f0fe"}.fa-podcast:before{content:"\f2ce"}.fa-poll:before{content:"\f681"}.fa-poll-h:before{content:"\f682"}.fa-poo:before{content:"\f2fe"}.fa-poo-storm:before{content:"\f75a"}.fa-poop:before{content:"\f619"}.fa-portrait:before{content:"\f3e0"}.fa-pound-sign:before{content:"\f154"}.fa-power-off:before{content:"\f011"}.fa-pray:before{content:"\f683"}.fa-praying-hands:before{content:"\f684"}.fa-prescription:before{content:"\f5b1"}.fa-prescription-bottle:before{content:"\f485"}.fa-prescription-bottle-alt:before{content:"\f486"}.fa-print:before{content:"\f02f"}.fa-procedures:before{content:"\f487"}.fa-product-hunt:before{content:"\f288"}.fa-project-diagram:before{content:"\f542"}.fa-pump-medical:before{content:"\e06a"}.fa-pump-soap:before{content:"\e06b"}.fa-pushed:before{content:"\f3e1"}.fa-puzzle-piece:before{content:"\f12e"}.fa-python:before{content:"\f3e2"}.fa-qq:before{content:"\f1d6"}.fa-qrcode:before{content:"\f029"}.fa-question:before{content:"\f128"}.fa-question-circle:before{content:"\f059"}.fa-quidditch:before{content:"\f458"}.fa-quinscape:before{content:"\f459"}.fa-quora:before{content:"\f2c4"}.fa-quote-left:before{content:"\f10d"}.fa-quote-right:before{content:"\f10e"}.fa-quran:before{content:"\f687"}.fa-r-project:before{content:"\f4f7"}.fa-radiation:before{content:"\f7b9"}.fa-radiation-alt:before{content:"\f7ba"}.fa-rainbow:before{content:"\f75b"}.fa-random:before{content:"\f074"}.fa-raspberry-pi:before{content:"\f7bb"}.fa-ravelry:before{content:"\f2d9"}.fa-react:before{content:"\f41b"}.fa-reacteurope:before{content:"\f75d"}.fa-readme:before{content:"\f4d5"}.fa-rebel:before{content:"\f1d0"}.fa-receipt:before{content:"\f543"}.fa-record-vinyl:before{content:"\f8d9"}.fa-recycle:before{content:"\f1b8"}.fa-red-river:before{content:"\f3e3"}.fa-reddit:before{content:"\f1a1"}.fa-reddit-alien:before{content:"\f281"}.fa-reddit-square:before{content:"\f1a2"}.fa-redhat:before{content:"\f7bc"}.fa-redo:before{content:"\f01e"}.fa-redo-alt:before{content:"\f2f9"}.fa-registered:before{content:"\f25d"}.fa-remove-format:before{content:"\f87d"}.fa-renren:before{content:"\f18b"}.fa-reply:before{content:"\f3e5"}.fa-reply-all:before{content:"\f122"}.fa-replyd:before{content:"\f3e6"}.fa-republican:before{content:"\f75e"}.fa-researchgate:before{content:"\f4f8"}.fa-resolving:before{content:"\f3e7"}.fa-restroom:before{content:"\f7bd"}.fa-retweet:before{content:"\f079"}.fa-rev:before{content:"\f5b2"}.fa-ribbon:before{content:"\f4d6"}.fa-ring:before{content:"\f70b"}.fa-road:before{content:"\f018"}.fa-robot:before{content:"\f544"}.fa-rocket:before{content:"\f135"}.fa-rocketchat:before{content:"\f3e8"}.fa-rockrms:before{content:"\f3e9"}.fa-route:before{content:"\f4d7"}.fa-rss:before{content:"\f09e"}.fa-rss-square:before{content:"\f143"}.fa-ruble-sign:before{content:"\f158"}.fa-ruler:before{content:"\f545"}.fa-ruler-combined:before{content:"\f546"}.fa-ruler-horizontal:before{content:"\f547"}.fa-ruler-vertical:before{content:"\f548"}.fa-running:before{content:"\f70c"}.fa-rupee-sign:before{content:"\f156"}.fa-rust:before{content:"\e07a"}.fa-sad-cry:before{content:"\f5b3"}.fa-sad-tear:before{content:"\f5b4"}.fa-safari:before{content:"\f267"}.fa-salesforce:before{content:"\f83b"}.fa-sass:before{content:"\f41e"}.fa-satellite:before{content:"\f7bf"}.fa-satellite-dish:before{content:"\f7c0"}.fa-save:before{content:"\f0c7"}.fa-schlix:before{content:"\f3ea"}.fa-school:before{content:"\f549"}.fa-screwdriver:before{content:"\f54a"}.fa-scribd:before{content:"\f28a"}.fa-scroll:before{content:"\f70e"}.fa-sd-card:before{content:"\f7c2"}.fa-search:before{content:"\f002"}.fa-search-dollar:before{content:"\f688"}.fa-search-location:before{content:"\f689"}.fa-search-minus:before{content:"\f010"}.fa-search-plus:before{content:"\f00e"}.fa-searchengin:before{content:"\f3eb"}.fa-seedling:before{content:"\f4d8"}.fa-sellcast:before{content:"\f2da"}.fa-sellsy:before{content:"\f213"}.fa-server:before{content:"\f233"}.fa-servicestack:before{content:"\f3ec"}.fa-shapes:before{content:"\f61f"}.fa-share:before{content:"\f064"}.fa-share-alt:before{content:"\f1e0"}.fa-share-alt-square:before{content:"\f1e1"}.fa-share-square:before{content:"\f14d"}.fa-shekel-sign:before{content:"\f20b"}.fa-shield-alt:before{content:"\f3ed"}.fa-shield-virus:before{content:"\e06c"}.fa-ship:before{content:"\f21a"}.fa-shipping-fast:before{content:"\f48b"}.fa-shirtsinbulk:before{content:"\f214"}.fa-shoe-prints:before{content:"\f54b"}.fa-shopify:before{content:"\e057"}.fa-shopping-bag:before{content:"\f290"}.fa-shopping-basket:before{content:"\f291"}.fa-shopping-cart:before{content:"\f07a"}.fa-shopware:before{content:"\f5b5"}.fa-shower:before{content:"\f2cc"}.fa-shuttle-van:before{content:"\f5b6"}.fa-sign:before{content:"\f4d9"}.fa-sign-in-alt:before{content:"\f2f6"}.fa-sign-language:before{content:"\f2a7"}.fa-sign-out-alt:before{content:"\f2f5"}.fa-signal:before{content:"\f012"}.fa-signature:before{content:"\f5b7"}.fa-sim-card:before{content:"\f7c4"}.fa-simplybuilt:before{content:"\f215"}.fa-sink:before{content:"\e06d"}.fa-sistrix:before{content:"\f3ee"}.fa-sitemap:before{content:"\f0e8"}.fa-sith:before{content:"\f512"}.fa-skating:before{content:"\f7c5"}.fa-sketch:before{content:"\f7c6"}.fa-skiing:before{content:"\f7c9"}.fa-skiing-nordic:before{content:"\f7ca"}.fa-skull:before{content:"\f54c"}.fa-skull-crossbones:before{content:"\f714"}.fa-skyatlas:before{content:"\f216"}.fa-skype:before{content:"\f17e"}.fa-slack:before{content:"\f198"}.fa-slack-hash:before{content:"\f3ef"}.fa-slash:before{content:"\f715"}.fa-sleigh:before{content:"\f7cc"}.fa-sliders-h:before{content:"\f1de"}.fa-slideshare:before{content:"\f1e7"}.fa-smile:before{content:"\f118"}.fa-smile-beam:before{content:"\f5b8"}.fa-smile-wink:before{content:"\f4da"}.fa-smog:before{content:"\f75f"}.fa-smoking:before{content:"\f48d"}.fa-smoking-ban:before{content:"\f54d"}.fa-sms:before{content:"\f7cd"}.fa-snapchat:before{content:"\f2ab"}.fa-snapchat-ghost:before{content:"\f2ac"}.fa-snapchat-square:before{content:"\f2ad"}.fa-snowboarding:before{content:"\f7ce"}.fa-snowflake:before{content:"\f2dc"}.fa-snowman:before{content:"\f7d0"}.fa-snowplow:before{content:"\f7d2"}.fa-soap:before{content:"\e06e"}.fa-socks:before{content:"\f696"}.fa-solar-panel:before{content:"\f5ba"}.fa-sort:before{content:"\f0dc"}.fa-sort-alpha-down:before{content:"\f15d"}.fa-sort-alpha-down-alt:before{content:"\f881"}.fa-sort-alpha-up:before{content:"\f15e"}.fa-sort-alpha-up-alt:before{content:"\f882"}.fa-sort-amount-down:before{content:"\f160"}.fa-sort-amount-down-alt:before{content:"\f884"}.fa-sort-amount-up:before{content:"\f161"}.fa-sort-amount-up-alt:before{content:"\f885"}.fa-sort-down:before{content:"\f0dd"}.fa-sort-numeric-down:before{content:"\f162"}.fa-sort-numeric-down-alt:before{content:"\f886"}.fa-sort-numeric-up:before{content:"\f163"}.fa-sort-numeric-up-alt:before{content:"\f887"}.fa-sort-up:before{content:"\f0de"}.fa-soundcloud:before{content:"\f1be"}.fa-sourcetree:before{content:"\f7d3"}.fa-spa:before{content:"\f5bb"}.fa-space-shuttle:before{content:"\f197"}.fa-speakap:before{content:"\f3f3"}.fa-speaker-deck:before{content:"\f83c"}.fa-spell-check:before{content:"\f891"}.fa-spider:before{content:"\f717"}.fa-spinner:before{content:"\f110"}.fa-splotch:before{content:"\f5bc"}.fa-spotify:before{content:"\f1bc"}.fa-spray-can:before{content:"\f5bd"}.fa-square:before{content:"\f0c8"}.fa-square-full:before{content:"\f45c"}.fa-square-root-alt:before{content:"\f698"}.fa-squarespace:before{content:"\f5be"}.fa-stack-exchange:before{content:"\f18d"}.fa-stack-overflow:before{content:"\f16c"}.fa-stackpath:before{content:"\f842"}.fa-stamp:before{content:"\f5bf"}.fa-star:before{content:"\f005"}.fa-star-and-crescent:before{content:"\f699"}.fa-star-half:before{content:"\f089"}.fa-star-half-alt:before{content:"\f5c0"}.fa-star-of-david:before{content:"\f69a"}.fa-star-of-life:before{content:"\f621"}.fa-staylinked:before{content:"\f3f5"}.fa-steam:before{content:"\f1b6"}.fa-steam-square:before{content:"\f1b7"}.fa-steam-symbol:before{content:"\f3f6"}.fa-step-backward:before{content:"\f048"}.fa-step-forward:before{content:"\f051"}.fa-stethoscope:before{content:"\f0f1"}.fa-sticker-mule:before{content:"\f3f7"}.fa-sticky-note:before{content:"\f249"}.fa-stop:before{content:"\f04d"}.fa-stop-circle:before{content:"\f28d"}.fa-stopwatch:before{content:"\f2f2"}.fa-stopwatch-20:before{content:"\e06f"}.fa-store:before{content:"\f54e"}.fa-store-alt:before{content:"\f54f"}.fa-store-alt-slash:before{content:"\e070"}.fa-store-slash:before{content:"\e071"}.fa-strava:before{content:"\f428"}.fa-stream:before{content:"\f550"}.fa-street-view:before{content:"\f21d"}.fa-strikethrough:before{content:"\f0cc"}.fa-stripe:before{content:"\f429"}.fa-stripe-s:before{content:"\f42a"}.fa-stroopwafel:before{content:"\f551"}.fa-studiovinari:before{content:"\f3f8"}.fa-stumbleupon:before{content:"\f1a4"}.fa-stumbleupon-circle:before{content:"\f1a3"}.fa-subscript:before{content:"\f12c"}.fa-subway:before{content:"\f239"}.fa-suitcase:before{content:"\f0f2"}.fa-suitcase-rolling:before{content:"\f5c1"}.fa-sun:before{content:"\f185"}.fa-superpowers:before{content:"\f2dd"}.fa-superscript:before{content:"\f12b"}.fa-supple:before{content:"\f3f9"}.fa-surprise:before{content:"\f5c2"}.fa-suse:before{content:"\f7d6"}.fa-swatchbook:before{content:"\f5c3"}.fa-swift:before{content:"\f8e1"}.fa-swimmer:before{content:"\f5c4"}.fa-swimming-pool:before{content:"\f5c5"}.fa-symfony:before{content:"\f83d"}.fa-synagogue:before{content:"\f69b"}.fa-sync:before{content:"\f021"}.fa-sync-alt:before{content:"\f2f1"}.fa-syringe:before{content:"\f48e"}.fa-table:before{content:"\f0ce"}.fa-table-tennis:before{content:"\f45d"}.fa-tablet:before{content:"\f10a"}.fa-tablet-alt:before{content:"\f3fa"}.fa-tablets:before{content:"\f490"}.fa-tachometer-alt:before{content:"\f3fd"}.fa-tag:before{content:"\f02b"}.fa-tags:before{content:"\f02c"}.fa-tape:before{content:"\f4db"}.fa-tasks:before{content:"\f0ae"}.fa-taxi:before{content:"\f1ba"}.fa-teamspeak:before{content:"\f4f9"}.fa-teeth:before{content:"\f62e"}.fa-teeth-open:before{content:"\f62f"}.fa-telegram:before{content:"\f2c6"}.fa-telegram-plane:before{content:"\f3fe"}.fa-temperature-high:before{content:"\f769"}.fa-temperature-low:before{content:"\f76b"}.fa-tencent-weibo:before{content:"\f1d5"}.fa-tenge:before{content:"\f7d7"}.fa-terminal:before{content:"\f120"}.fa-text-height:before{content:"\f034"}.fa-text-width:before{content:"\f035"}.fa-th:before{content:"\f00a"}.fa-th-large:before{content:"\f009"}.fa-th-list:before{content:"\f00b"}.fa-the-red-yeti:before{content:"\f69d"}.fa-theater-masks:before{content:"\f630"}.fa-themeco:before{content:"\f5c6"}.fa-themeisle:before{content:"\f2b2"}.fa-thermometer:before{content:"\f491"}.fa-thermometer-empty:before{content:"\f2cb"}.fa-thermometer-full:before{content:"\f2c7"}.fa-thermometer-half:before{content:"\f2c9"}.fa-thermometer-quarter:before{content:"\f2ca"}.fa-thermometer-three-quarters:before{content:"\f2c8"}.fa-think-peaks:before{content:"\f731"}.fa-thumbs-down:before{content:"\f165"}.fa-thumbs-up:before{content:"\f164"}.fa-thumbtack:before{content:"\f08d"}.fa-ticket-alt:before{content:"\f3ff"}.fa-tiktok:before{content:"\e07b"}.fa-times:before{content:"\f00d"}.fa-times-circle:before{content:"\f057"}.fa-tint:before{content:"\f043"}.fa-tint-slash:before{content:"\f5c7"}.fa-tired:before{content:"\f5c8"}.fa-toggle-off:before{content:"\f204"}.fa-toggle-on:before{content:"\f205"}.fa-toilet:before{content:"\f7d8"}.fa-toilet-paper:before{content:"\f71e"}.fa-toilet-paper-slash:before{content:"\e072"}.fa-toolbox:before{content:"\f552"}.fa-tools:before{content:"\f7d9"}.fa-tooth:before{content:"\f5c9"}.fa-torah:before{content:"\f6a0"}.fa-torii-gate:before{content:"\f6a1"}.fa-tractor:before{content:"\f722"}.fa-trade-federation:before{content:"\f513"}.fa-trademark:before{content:"\f25c"}.fa-traffic-light:before{content:"\f637"}.fa-trailer:before{content:"\e041"}.fa-train:before{content:"\f238"}.fa-tram:before{content:"\f7da"}.fa-transgender:before{content:"\f224"}.fa-transgender-alt:before{content:"\f225"}.fa-trash:before{content:"\f1f8"}.fa-trash-alt:before{content:"\f2ed"}.fa-trash-restore:before{content:"\f829"}.fa-trash-restore-alt:before{content:"\f82a"}.fa-tree:before{content:"\f1bb"}.fa-trello:before{content:"\f181"}.fa-tripadvisor:before{content:"\f262"}.fa-trophy:before{content:"\f091"}.fa-truck:before{content:"\f0d1"}.fa-truck-loading:before{content:"\f4de"}.fa-truck-monster:before{content:"\f63b"}.fa-truck-moving:before{content:"\f4df"}.fa-truck-pickup:before{content:"\f63c"}.fa-tshirt:before{content:"\f553"}.fa-tty:before{content:"\f1e4"}.fa-tumblr:before{content:"\f173"}.fa-tumblr-square:before{content:"\f174"}.fa-tv:before{content:"\f26c"}.fa-twitch:before{content:"\f1e8"}.fa-twitter:before{content:"\f099"}.fa-twitter-square:before{content:"\f081"}.fa-typo3:before{content:"\f42b"}.fa-uber:before{content:"\f402"}.fa-ubuntu:before{content:"\f7df"}.fa-uikit:before{content:"\f403"}.fa-umbraco:before{content:"\f8e8"}.fa-umbrella:before{content:"\f0e9"}.fa-umbrella-beach:before{content:"\f5ca"}.fa-uncharted:before{content:"\e084"}.fa-underline:before{content:"\f0cd"}.fa-undo:before{content:"\f0e2"}.fa-undo-alt:before{content:"\f2ea"}.fa-uniregistry:before{content:"\f404"}.fa-unity:before{content:"\e049"}.fa-universal-access:before{content:"\f29a"}.fa-university:before{content:"\f19c"}.fa-unlink:before{content:"\f127"}.fa-unlock:before{content:"\f09c"}.fa-unlock-alt:before{content:"\f13e"}.fa-unsplash:before{content:"\e07c"}.fa-untappd:before{content:"\f405"}.fa-upload:before{content:"\f093"}.fa-ups:before{content:"\f7e0"}.fa-usb:before{content:"\f287"}.fa-user:before{content:"\f007"}.fa-user-alt:before{content:"\f406"}.fa-user-alt-slash:before{content:"\f4fa"}.fa-user-astronaut:before{content:"\f4fb"}.fa-user-check:before{content:"\f4fc"}.fa-user-circle:before{content:"\f2bd"}.fa-user-clock:before{content:"\f4fd"}.fa-user-cog:before{content:"\f4fe"}.fa-user-edit:before{content:"\f4ff"}.fa-user-friends:before{content:"\f500"}.fa-user-graduate:before{content:"\f501"}.fa-user-injured:before{content:"\f728"}.fa-user-lock:before{content:"\f502"}.fa-user-md:before{content:"\f0f0"}.fa-user-minus:before{content:"\f503"}.fa-user-ninja:before{content:"\f504"}.fa-user-nurse:before{content:"\f82f"}.fa-user-plus:before{content:"\f234"}.fa-user-secret:before{content:"\f21b"}.fa-user-shield:before{content:"\f505"}.fa-user-slash:before{content:"\f506"}.fa-user-tag:before{content:"\f507"}.fa-user-tie:before{content:"\f508"}.fa-user-times:before{content:"\f235"}.fa-users:before{content:"\f0c0"}.fa-users-cog:before{content:"\f509"}.fa-users-slash:before{content:"\e073"}.fa-usps:before{content:"\f7e1"}.fa-ussunnah:before{content:"\f407"}.fa-utensil-spoon:before{content:"\f2e5"}.fa-utensils:before{content:"\f2e7"}.fa-vaadin:before{content:"\f408"}.fa-vector-square:before{content:"\f5cb"}.fa-venus:before{content:"\f221"}.fa-venus-double:before{content:"\f226"}.fa-venus-mars:before{content:"\f228"}.fa-vest:before{content:"\e085"}.fa-vest-patches:before{content:"\e086"}.fa-viacoin:before{content:"\f237"}.fa-viadeo:before{content:"\f2a9"}.fa-viadeo-square:before{content:"\f2aa"}.fa-vial:before{content:"\f492"}.fa-vials:before{content:"\f493"}.fa-viber:before{content:"\f409"}.fa-video:before{content:"\f03d"}.fa-video-slash:before{content:"\f4e2"}.fa-vihara:before{content:"\f6a7"}.fa-vimeo:before{content:"\f40a"}.fa-vimeo-square:before{content:"\f194"}.fa-vimeo-v:before{content:"\f27d"}.fa-vine:before{content:"\f1ca"}.fa-virus:before{content:"\e074"}.fa-virus-slash:before{content:"\e075"}.fa-viruses:before{content:"\e076"}.fa-vk:before{content:"\f189"}.fa-vnv:before{content:"\f40b"}.fa-voicemail:before{content:"\f897"}.fa-volleyball-ball:before{content:"\f45f"}.fa-volume-down:before{content:"\f027"}.fa-volume-mute:before{content:"\f6a9"}.fa-volume-off:before{content:"\f026"}.fa-volume-up:before{content:"\f028"}.fa-vote-yea:before{content:"\f772"}.fa-vr-cardboard:before{content:"\f729"}.fa-vuejs:before{content:"\f41f"}.fa-walking:before{content:"\f554"}.fa-wallet:before{content:"\f555"}.fa-warehouse:before{content:"\f494"}.fa-watchman-monitoring:before{content:"\e087"}.fa-water:before{content:"\f773"}.fa-wave-square:before{content:"\f83e"}.fa-waze:before{content:"\f83f"}.fa-weebly:before{content:"\f5cc"}.fa-weibo:before{content:"\f18a"}.fa-weight:before{content:"\f496"}.fa-weight-hanging:before{content:"\f5cd"}.fa-weixin:before{content:"\f1d7"}.fa-whatsapp:before{content:"\f232"}.fa-whatsapp-square:before{content:"\f40c"}.fa-wheelchair:before{content:"\f193"}.fa-whmcs:before{content:"\f40d"}.fa-wifi:before{content:"\f1eb"}.fa-wikipedia-w:before{content:"\f266"}.fa-wind:before{content:"\f72e"}.fa-window-close:before{content:"\f410"}.fa-window-maximize:before{content:"\f2d0"}.fa-window-minimize:before{content:"\f2d1"}.fa-window-restore:before{content:"\f2d2"}.fa-windows:before{content:"\f17a"}.fa-wine-bottle:before{content:"\f72f"}.fa-wine-glass:before{content:"\f4e3"}.fa-wine-glass-alt:before{content:"\f5ce"}.fa-wix:before{content:"\f5cf"}.fa-wizards-of-the-coast:before{content:"\f730"}.fa-wodu:before{content:"\e088"}.fa-wolf-pack-battalion:before{content:"\f514"}.fa-won-sign:before{content:"\f159"}.fa-wordpress:before{content:"\f19a"}.fa-wordpress-simple:before{content:"\f411"}.fa-wpbeginner:before{content:"\f297"}.fa-wpexplorer:before{content:"\f2de"}.fa-wpforms:before{content:"\f298"}.fa-wpressr:before{content:"\f3e4"}.fa-wrench:before{content:"\f0ad"}.fa-x-ray:before{content:"\f497"}.fa-xbox:before{content:"\f412"}.fa-xing:before{content:"\f168"}.fa-xing-square:before{content:"\f169"}.fa-y-combinator:before{content:"\f23b"}.fa-yahoo:before{content:"\f19e"}.fa-yammer:before{content:"\f840"}.fa-yandex:before{content:"\f413"}.fa-yandex-international:before{content:"\f414"}.fa-yarn:before{content:"\f7e3"}.fa-yelp:before{content:"\f1e9"}.fa-yen-sign:before{content:"\f157"}.fa-yin-yang:before{content:"\f6ad"}.fa-yoast:before{content:"\f2b1"}.fa-youtube:before{content:"\f167"}.fa-youtube-square:before{content:"\f431"}.fa-zhihu:before{content:"\f63f"}.sr-only{border:0;clip:rect(0,0,0,0);height:1px;margin:-1px;overflow:hidden;padding:0;position:absolute;width:1px}.sr-only-focusable:active,.sr-only-focusable:focus{clip:auto;height:auto;margin:0;overflow:visible;position:static;width:auto}@font-face{font-family:"Font Awesome 5 Brands";font-style:normal;font-weight:400;font-display:block;src:url(../webfonts/fa-brands-400.eot);src:url(../webfonts/fa-brands-400.eot?#iefix) format("embedded-opentype"),url(../webfonts/fa-brands-400.woff2) format("woff2"),url(../webfonts/fa-brands-400.woff) format("woff"),url(../webfonts/fa-brands-400.ttf) format("truetype"),url(../webfonts/fa-brands-400.svg#fontawesome) format("svg")}.fab{font-family:"Font Awesome 5 Brands"}@font-face{font-family:"Font Awesome 5 Free";font-style:normal;font-weight:400;font-display:block;src:url(../webfonts/fa-regular-400.eot);src:url(../webfonts/fa-regular-400.eot?#iefix) format("embedded-opentype"),url(../webfonts/fa-regular-400.woff2) format("woff2"),url(../webfonts/fa-regular-400.woff) format("woff"),url(../webfonts/fa-regular-400.ttf) format("truetype"),url(../webfonts/fa-regular-400.svg#fontawesome) format("svg")}.fab,.far{font-weight:400}@font-face{font-family:"Font Awesome 5 Free";font-style:normal;font-weight:900;font-display:block;src:url(../webfonts/fa-solid-900.eot);src:url(../webfonts/fa-solid-900.eot?#iefix) format("embedded-opentype"),url(../webfonts/fa-solid-900.woff2) format("woff2"),url(../webfonts/fa-solid-900.woff) format("woff"),url(../webfonts/fa-solid-900.ttf) format("truetype"),url(../webfonts/fa-solid-900.svg#fontawesome) format("svg")}.fa,.far,.fas{font-family:"Font Awesome 5 Free"}.fa,.fas{font-weight:900}
docs/static/css/index.css ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ body {
2
+ font-family: 'Noto Sans', sans-serif;
3
+ }
4
+
5
+
6
+ .footer .icon-link {
7
+ font-size: 25px;
8
+ color: #000;
9
+ }
10
+
11
+ .link-block a {
12
+ margin-top: 5px;
13
+ margin-bottom: 5px;
14
+ }
15
+
16
+ .dnerf {
17
+ font-variant: small-caps;
18
+ }
19
+
20
+
21
+ .teaser .hero-body {
22
+ padding-top: 0;
23
+ padding-bottom: 3rem;
24
+ }
25
+
26
+ .teaser {
27
+ font-family: 'Google Sans', sans-serif;
28
+ }
29
+
30
+ .teaser-subtitle {
31
+ font-size: 1.1rem !important;
32
+ line-height: 1.5;
33
+ margin-top: 1rem;
34
+ }
35
+
36
+
37
+ .publication-title {
38
+ }
39
+
40
+ .publication-banner {
41
+ max-height: parent;
42
+
43
+ }
44
+
45
+ .publication-banner video {
46
+ position: relative;
47
+ left: auto;
48
+ top: auto;
49
+ transform: none;
50
+ object-fit: fit;
51
+ }
52
+
53
+ .publication-header .hero-body {
54
+ }
55
+
56
+ .publication-title {
57
+ font-family: 'Google Sans', sans-serif;
58
+ }
59
+
60
+ .publication-authors {
61
+ font-family: 'Google Sans', sans-serif;
62
+ }
63
+
64
+ .publication-venue {
65
+ color: #555;
66
+ width: fit-content;
67
+ font-weight: bold;
68
+ }
69
+
70
+ .publication-awards {
71
+ color: #ff3860;
72
+ width: fit-content;
73
+ font-weight: bolder;
74
+ }
75
+
76
+ .publication-authors {
77
+ }
78
+
79
+ .publication-authors a {
80
+ color: hsl(204, 86%, 53%) !important;
81
+ }
82
+
83
+ .publication-authors a:hover {
84
+ text-decoration: underline;
85
+ }
86
+
87
+ .author-block {
88
+ display: inline-block;
89
+ }
90
+
91
+ .publication-banner img {
92
+ }
93
+
94
+ .publication-authors {
95
+ /*color: #4286f4;*/
96
+ }
97
+
98
+ .publication-video {
99
+ position: relative;
100
+ width: 100%;
101
+ height: 0;
102
+ padding-bottom: 56.25%;
103
+
104
+ overflow: hidden;
105
+ border-radius: 10px !important;
106
+ }
107
+
108
+ .publication-video iframe {
109
+ position: absolute;
110
+ top: 0;
111
+ left: 0;
112
+ width: 100%;
113
+ height: 100%;
114
+ }
115
+
116
+ .publication-body img {
117
+ }
118
+
119
+ .results-carousel {
120
+ overflow: hidden;
121
+ }
122
+
123
+ .results-carousel .item {
124
+ margin: 5px;
125
+ overflow: hidden;
126
+ border: 1px solid #bbb;
127
+ border-radius: 10px;
128
+ padding: 0;
129
+ font-size: 0;
130
+ }
131
+
132
+ .results-carousel video {
133
+ margin: 0;
134
+ }
135
+
136
+
137
+ .interpolation-panel {
138
+ background: #f5f5f5;
139
+ border-radius: 10px;
140
+ }
141
+
142
+ .interpolation-panel .interpolation-image {
143
+ width: 100%;
144
+ border-radius: 5px;
145
+ }
146
+
147
+ .interpolation-video-column {
148
+ }
149
+
150
+ .interpolation-panel .slider {
151
+ margin: 0 !important;
152
+ }
153
+
154
+ .interpolation-panel .slider {
155
+ margin: 0 !important;
156
+ }
157
+
158
+ #interpolation-image-wrapper {
159
+ width: 100%;
160
+ }
161
+ #interpolation-image-wrapper img {
162
+ border-radius: 5px;
163
+ }
164
+
165
+ .image-caption {
166
+ margin-top: 0.2rem;
167
+ font-size: 1.0rem;
168
+ color: #4a4a4a;
169
+ text-align: center;
170
+ }
docs/static/images/application.jpg ADDED

Git LFS Details

  • SHA256: a316d0216dc5d75d3560f165852de33381e79439027019e3e7d1d8709be85355
  • Pointer size: 132 Bytes
  • Size of remote file: 1.7 MB
docs/static/images/cross_custom.jpg ADDED

Git LFS Details

  • SHA256: 633a9df42786b62b64f4b107fdb940160a6446c975b1878aa0273d4e751673aa
  • Pointer size: 132 Bytes
  • Size of remote file: 2.64 MB
docs/static/images/framework.jpg ADDED

Git LFS Details

  • SHA256: 9d0ba5df5c590b0ac45111c625b38cc6d86b0d9e98a5ed28baa0633e00bc73fe
  • Pointer size: 131 Bytes
  • Size of remote file: 802 kB
docs/static/images/gradio_preview.png ADDED

Git LFS Details

  • SHA256: 2f44b7a42bce2baaeba05234efd2ff3ff4f1a7dc04cfe452ae6d635ba8e355a9
  • Pointer size: 132 Bytes
  • Size of remote file: 3.08 MB
docs/static/images/icon.jpg ADDED

Git LFS Details

  • SHA256: c59125b13a016243913f233aef268480239a1b7fc6eb048cbbca541369c4d207
  • Pointer size: 131 Bytes
  • Size of remote file: 331 kB
docs/static/images/multilingual_samples.png ADDED

Git LFS Details

  • SHA256: a69c92a06523a043449d51ae12c214ebf3664e6cb62aaf8098f174a6c6ad088e
  • Pointer size: 132 Bytes
  • Size of remote file: 1.39 MB
docs/static/images/non-text.jpg ADDED

Git LFS Details

  • SHA256: b0d03ec306f1563d4d86763766c7da941b01face1b125b17e4f310132b7e13bc
  • Pointer size: 132 Bytes
  • Size of remote file: 3.3 MB
docs/static/images/self_custom.jpg ADDED

Git LFS Details

  • SHA256: c53fa86a20ec9824d1bc70646f546e1c6d2f92b7a7c944b3b8458ebe67c2737e
  • Pointer size: 132 Bytes
  • Size of remote file: 3.63 MB
docs/static/images/teaser.jpg ADDED

Git LFS Details

  • SHA256: f877b2fbdf16361e2fb2772f1118bd22f4fb9519dddf4ecfed9926df0bf04985
  • Pointer size: 132 Bytes
  • Size of remote file: 1.38 MB
docs/static/js/bulma-carousel.min.js ADDED
@@ -0,0 +1 @@
 
 
1
+ !function(t,e){"object"==typeof exports&&"object"==typeof module?module.exports=e():"function"==typeof define&&define.amd?define([],e):"object"==typeof exports?exports.bulmaCarousel=e():t.bulmaCarousel=e()}("undefined"!=typeof self?self:this,function(){return function(i){var n={};function s(t){if(n[t])return n[t].exports;var e=n[t]={i:t,l:!1,exports:{}};return i[t].call(e.exports,e,e.exports,s),e.l=!0,e.exports}return s.m=i,s.c=n,s.d=function(t,e,i){s.o(t,e)||Object.defineProperty(t,e,{configurable:!1,enumerable:!0,get:i})},s.n=function(t){var e=t&&t.__esModule?function(){return t.default}:function(){return t};return s.d(e,"a",e),e},s.o=function(t,e){return Object.prototype.hasOwnProperty.call(t,e)},s.p="",s(s.s=5)}([function(t,e,i){"use strict";i.d(e,"d",function(){return s}),i.d(e,"e",function(){return r}),i.d(e,"b",function(){return o}),i.d(e,"c",function(){return a}),i.d(e,"a",function(){return l});var n=i(2),s=function(e,t){(t=Array.isArray(t)?t:t.split(" ")).forEach(function(t){e.classList.remove(t)})},r=function(t){return t.getBoundingClientRect().width||t.offsetWidth},o=function(t){return t.getBoundingClientRect().height||t.offsetHeight},a=function(t){var e=1<arguments.length&&void 0!==arguments[1]&&arguments[1],i=t.offsetHeight;if(e){var n=window.getComputedStyle(t);i+=parseInt(n.marginTop)+parseInt(n.marginBottom)}return i},l=function(t,e){if(!e)return window.getComputedStyle(t);if(Object(n.b)(e)){var i="";Object.keys(e).forEach(function(t){i+=t+": "+e[t]+";"}),t.style.cssText+=i}}},function(t,e,i){"use strict";e.a=function(){var t=!1;try{var e=Object.defineProperty({},"passive",{get:function(){t=!0}});window.addEventListener("testPassive",null,e),window.removeEventListener("testPassive",null,e)}catch(t){}return t}},function(t,e,i){"use strict";i.d(e,"a",function(){return s}),i.d(e,"c",function(){return r}),i.d(e,"b",function(){return o});var n="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t},s=function(t){return"function"==typeof t},r=function(t){return"string"==typeof t||!!t&&"object"===(void 0===t?"undefined":n(t))&&"[object String]"===Object.prototype.toString.call(t)},o=function(t){return("function"==typeof t||"object"===(void 0===t?"undefined":n(t))&&!!t)&&!Array.isArray(t)}},function(t,e,i){"use strict";var n=function(){function n(t,e){for(var i=0;i<e.length;i++){var n=e[i];n.enumerable=n.enumerable||!1,n.configurable=!0,"value"in n&&(n.writable=!0),Object.defineProperty(t,n.key,n)}}return function(t,e,i){return e&&n(t.prototype,e),i&&n(t,i),t}}();var s=function(){function e(){var t=0<arguments.length&&void 0!==arguments[0]?arguments[0]:[];!function(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}(this,e),this.events=new Map(t)}return n(e,[{key:"on",value:function(t,e){var i=this;return this.events.set(t,[].concat(function(t){if(Array.isArray(t)){for(var e=0,i=Array(t.length);e<t.length;e++)i[e]=t[e];return i}return Array.from(t)}(this.events.has(t)?this.events.get(t):[]),[e])),function(){return i.events.set(t,i.events.get(t).filter(function(t){return t!==e}))}}},{key:"emit",value:function(t){for(var e=arguments.length,i=Array(1<e?e-1:0),n=1;n<e;n++)i[n-1]=arguments[n];return this.events.has(t)&&this.events.get(t).map(function(t){return t.apply(void 0,i)})}}]),e}();e.a=s},function(t,e,i){"use strict";var n=function(){function n(t,e){for(var i=0;i<e.length;i++){var n=e[i];n.enumerable=n.enumerable||!1,n.configurable=!0,"value"in n&&(n.writable=!0),Object.defineProperty(t,n.key,n)}}return function(t,e,i){return e&&n(t.prototype,e),i&&n(t,i),t}}();var s=function(){function s(){var t=0<arguments.length&&void 0!==arguments[0]?arguments[0]:0,e=1<arguments.length&&void 0!==arguments[1]?arguments[1]:0;!function(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}(this,s),this._x=t,this._y=e}return n(s,[{key:"add",value:function(t){return new s(this._x+t._x,this._y+t._y)}},{key:"sub",value:function(t){return new s(this._x-t._x,this._y-t._y)}},{key:"distance",value:function(t){var e=this._x-t._x,i=this._y-t._y;return Math.sqrt(Math.pow(e,2)+Math.pow(i,2))}},{key:"max",value:function(t){return new s(Math.max(this._x,t._x),Math.max(this._y,t._y))}},{key:"equals",value:function(t){return this==t||!(!t||null==t)&&(this._x==t._x&&this._y==t._y)}},{key:"inside",value:function(t,e){return this._x>=t._x&&this._x<=e._x&&this._y>=t._y&&this._y<=e._y}},{key:"constrain",value:function(t,e){if(t._x>e._x||t._y>e._y)return this;var i=this._x,n=this._y;return null!==t._x&&(i=Math.max(i,t._x)),null!==e._x&&(i=Math.min(i,e._x)),null!==t._y&&(n=Math.max(n,t._y)),null!==e._y&&(n=Math.min(n,e._y)),new s(i,n)}},{key:"reposition",value:function(t){t.style.top=this._y+"px",t.style.left=this._x+"px"}},{key:"toString",value:function(){return"("+this._x+","+this._y+")"}},{key:"x",get:function(){return this._x},set:function(){var t=0<arguments.length&&void 0!==arguments[0]?arguments[0]:0;return this._x=t,this}},{key:"y",get:function(){return this._y},set:function(){var t=0<arguments.length&&void 0!==arguments[0]?arguments[0]:0;return this._y=t,this}}]),s}();e.a=s},function(t,e,i){"use strict";Object.defineProperty(e,"__esModule",{value:!0});var n=i(6),s=i(0),a=i(2),r=i(3),l=i(7),h=i(9),u=i(10),d=i(11),c=i(13),f=i(15),p=i(18),v=i(19),y=i(22),_=i(23),g=i(24),b=Object.assign||function(t){for(var e=1;e<arguments.length;e++){var i=arguments[e];for(var n in i)Object.prototype.hasOwnProperty.call(i,n)&&(t[n]=i[n])}return t},w=function(){function n(t,e){for(var i=0;i<e.length;i++){var n=e[i];n.enumerable=n.enumerable||!1,n.configurable=!0,"value"in n&&(n.writable=!0),Object.defineProperty(t,n.key,n)}}return function(t,e,i){return e&&n(t.prototype,e),i&&n(t,i),t}}();var o=function(t){function o(t){var e=1<arguments.length&&void 0!==arguments[1]?arguments[1]:{};!function(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}(this,o);var r=function(t,e){if(!t)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!e||"object"!=typeof e&&"function"!=typeof e?t:e}(this,(o.__proto__||Object.getPrototypeOf(o)).call(this));if(r.element=Object(a.c)(t)?document.querySelector(t):t,!r.element)throw new Error("An invalid selector or non-DOM node has been provided.");r._clickEvents=["click","touch"];var i=r.element.dataset?Object.keys(r.element.dataset).filter(function(t){return Object.keys(y.a).includes(t)}).reduce(function(t,e){return b({},t,(i={},n=e,s=r.element.dataset[e],n in i?Object.defineProperty(i,n,{value:s,enumerable:!0,configurable:!0,writable:!0}):i[n]=s,i));var i,n,s},{}):{};return r.options=b({},y.a,e,i),r._id=Object(n.a)("slider"),r.onShow=r.onShow.bind(r),r._init(),r}return function(t,e){if("function"!=typeof e&&null!==e)throw new TypeError("Super expression must either be null or a function, not "+typeof e);t.prototype=Object.create(e&&e.prototype,{constructor:{value:t,enumerable:!1,writable:!0,configurable:!0}}),e&&(Object.setPrototypeOf?Object.setPrototypeOf(t,e):t.__proto__=e)}(o,r["a"]),w(o,[{key:"_init",value:function(){return this._items=Array.from(this.element.children),this._breakpoint=new h.a(this),this._autoplay=new l.a(this),this._navigation=new c.a(this),this._pagination=new f.a(this),this._infinite=new u.a(this),this._loop=new d.a(this),this._swipe=new p.a(this),this._build(),Object(a.a)(this.options.onReady)&&this.options.onReady(this),this}},{key:"_build",value:function(){var i=this;this.node=document.createRange().createContextualFragment(Object(_.a)(this.id)),this._ui={wrapper:this.node.firstChild,container:this.node.querySelector(".slider-container")},this.element.appendChild(this.node),this._ui.wrapper.classList.add("is-loading"),this._ui.container.style.opacity=0,this._transitioner=new v.a(this),this._slides=this._items.map(function(t,e){return i._createSlide(t,e)}),this.reset(),this._bindEvents(),this._ui.container.style.opacity=1,this._ui.wrapper.classList.remove("is-loading")}},{key:"_bindEvents",value:function(){this.on("show",this.onShow)}},{key:"_unbindEvents",value:function(){this.off("show",this.onShow)}},{key:"_createSlide",value:function(t,e){var i=document.createRange().createContextualFragment(Object(g.a)()).firstChild;return i.dataset.sliderIndex=e,i.appendChild(t),i}},{key:"_setDimensions",value:function(){var e=this;this.options.vertical?(this._ui.wrapper.style.height=Object(s.c)(this._slides[0])*this.slidesToShow,this.options.centerMode&&(this._ui.wrapper.style.padding=this.options.centerPadding+" 0px")):this.options.centerMode&&(this._ui.wrapper.style.padding="0px "+this.options.centerPadding),this._wrapperWidth=Object(s.e)(this._ui.wrapper),this._wrapperHeight=Object(s.c)(this._ui.wrapper),this.options.vertical?(this._slideWidth=Math.ceil(this._wrapperWidth),this._containerHeight=Math.ceil(Object(s.c)(this._slides[0])*this._slides.length),this._ui.container.style.height=this._containerHeight+"px"):(this._slideWidth=Math.ceil(this._wrapperWidth/this.slidesToShow),this._containerWidth=Math.ceil(this._slideWidth*this._slides.length),this._ui.container.style.width=this._containerWidth+"px"),this._slides.forEach(function(t){t.style.width=e._slideWidth+"px"})}},{key:"_setHeight",value:function(){"translate"!==this.options.effect&&(this._ui.container.style.height=Object(s.c)(this._slides[this.state.index])+"px")}},{key:"_setClasses",value:function(){var e=this;this._slides.forEach(function(t){Object(s.d)(t,"is-active is-current is-slide-previous is-slide-next"),Math.abs((e.state.index-1)%e.state.length)===parseInt(t.dataset.sliderIndex,10)&&t.classList.add("is-slide-previous"),Math.abs(e.state.index%e.state.length)===parseInt(t.dataset.sliderIndex,10)&&t.classList.add("is-current"),Math.abs((e.state.index+1)%e.state.length)===parseInt(t.dataset.sliderIndex,10)&&t.classList.add("is-slide-next")})}},{key:"onShow",value:function(t){this._navigation.refresh(),this._pagination.refresh(),this._setClasses()}},{key:"next",value:function(){!this.options.loop&&!this.options.infinite&&this.state.index+this.slidesToScroll>this.state.length-this.slidesToShow&&!this.options.centerMode?this.state.next=this.state.index:this.state.next=this.state.index+this.slidesToScroll,this.show()}},{key:"previous",value:function(){this.options.loop||this.options.infinite||0!==this.state.index?this.state.next=this.state.index-this.slidesToScroll:this.state.next=this.state.index,this.show()}},{key:"start",value:function(){this._autoplay.start()}},{key:"pause",value:function(){this._autoplay.pause()}},{key:"stop",value:function(){this._autoplay.stop()}},{key:"show",value:function(t){var e=1<arguments.length&&void 0!==arguments[1]&&arguments[1];!this.state.length||this.state.length<=this.slidesToShow||("Number"==typeof t&&(this.state.next=t),this.options.loop&&this._loop.apply(),this.options.infinite&&this._infinite.apply(),this.state.index!==this.state.next&&(this.emit("before:show",this.state),this._transitioner.apply(e,this._setHeight.bind(this)),this.emit("after:show",this.state),this.emit("show",this)))}},{key:"reset",value:function(){var e=this;this.state={length:this._items.length,index:Math.abs(this.options.initialSlide),next:Math.abs(this.options.initialSlide),prev:void 0},this.options.loop&&this.options.infinite&&(this.options.loop=!1),this.options.slidesToScroll>this.options.slidesToShow&&(this.options.slidesToScroll=this.slidesToShow),this._breakpoint.init(),this.state.index>=this.state.length&&0!==this.state.index&&(this.state.index=this.state.index-this.slidesToScroll),this.state.length<=this.slidesToShow&&(this.state.index=0),this._ui.wrapper.appendChild(this._navigation.init().render()),this._ui.wrapper.appendChild(this._pagination.init().render()),this.options.navigationSwipe?this._swipe.bindEvents():this._swipe._bindEvents(),this._breakpoint.apply(),this._slides.forEach(function(t){return e._ui.container.appendChild(t)}),this._transitioner.init().apply(!0,this._setHeight.bind(this)),this.options.autoplay&&this._autoplay.init().start()}},{key:"destroy",value:function(){var e=this;this._unbindEvents(),this._items.forEach(function(t){e.element.appendChild(t)}),this.node.remove()}},{key:"id",get:function(){return this._id}},{key:"index",set:function(t){this._index=t},get:function(){return this._index}},{key:"length",set:function(t){this._length=t},get:function(){return this._length}},{key:"slides",get:function(){return this._slides},set:function(t){this._slides=t}},{key:"slidesToScroll",get:function(){return"translate"===this.options.effect?this._breakpoint.getSlidesToScroll():1}},{key:"slidesToShow",get:function(){return"translate"===this.options.effect?this._breakpoint.getSlidesToShow():1}},{key:"direction",get:function(){return"rtl"===this.element.dir.toLowerCase()||"rtl"===this.element.style.direction?"rtl":"ltr"}},{key:"wrapper",get:function(){return this._ui.wrapper}},{key:"wrapperWidth",get:function(){return this._wrapperWidth||0}},{key:"container",get:function(){return this._ui.container}},{key:"containerWidth",get:function(){return this._containerWidth||0}},{key:"slideWidth",get:function(){return this._slideWidth||0}},{key:"transitioner",get:function(){return this._transitioner}}],[{key:"attach",value:function(){var i=this,t=0<arguments.length&&void 0!==arguments[0]?arguments[0]:".slider",n=1<arguments.length&&void 0!==arguments[1]?arguments[1]:{},s=new Array,e=Object(a.c)(t)?document.querySelectorAll(t):Array.isArray(t)?t:[t];return[].forEach.call(e,function(t){if(void 0===t[i.constructor.name]){var e=new o(t,n);t[i.constructor.name]=e,s.push(e)}else s.push(t[i.constructor.name])}),s}}]),o}();e.default=o},function(t,e,i){"use strict";i.d(e,"a",function(){return n});var n=function(){return(0<arguments.length&&void 0!==arguments[0]?arguments[0]:"")+([1e7]+-1e3+-4e3+-8e3+-1e11).replace(/[018]/g,function(t){return(t^crypto.getRandomValues(new Uint8Array(1))[0]&15>>t/4).toString(16)})}},function(t,e,i){"use strict";var n=i(3),s=i(8),r=function(){function n(t,e){for(var i=0;i<e.length;i++){var n=e[i];n.enumerable=n.enumerable||!1,n.configurable=!0,"value"in n&&(n.writable=!0),Object.defineProperty(t,n.key,n)}}return function(t,e,i){return e&&n(t.prototype,e),i&&n(t,i),t}}();Symbol("onVisibilityChange"),Symbol("onMouseEnter"),Symbol("onMouseLeave");var o=function(t){function i(t){!function(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}(this,i);var e=function(t,e){if(!t)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!e||"object"!=typeof e&&"function"!=typeof e?t:e}(this,(i.__proto__||Object.getPrototypeOf(i)).call(this));return e.slider=t,e.onVisibilityChange=e.onVisibilityChange.bind(e),e.onMouseEnter=e.onMouseEnter.bind(e),e.onMouseLeave=e.onMouseLeave.bind(e),e}return function(t,e){if("function"!=typeof e&&null!==e)throw new TypeError("Super expression must either be null or a function, not "+typeof e);t.prototype=Object.create(e&&e.prototype,{constructor:{value:t,enumerable:!1,writable:!0,configurable:!0}}),e&&(Object.setPrototypeOf?Object.setPrototypeOf(t,e):t.__proto__=e)}(i,n["a"]),r(i,[{key:"init",value:function(){return this._bindEvents(),this}},{key:"_bindEvents",value:function(){document.addEventListener("visibilitychange",this.onVisibilityChange),this.slider.options.pauseOnHover&&(this.slider.container.addEventListener(s.a,this.onMouseEnter),this.slider.container.addEventListener(s.b,this.onMouseLeave))}},{key:"_unbindEvents",value:function(){document.removeEventListener("visibilitychange",this.onVisibilityChange),this.slider.container.removeEventListener(s.a,this.onMouseEnter),this.slider.container.removeEventListener(s.b,this.onMouseLeave)}},{key:"start",value:function(){var t=this;this.stop(),this.slider.options.autoplay&&(this.emit("start",this),this._interval=setInterval(function(){t._hovering&&t.slider.options.pauseOnHover||(!t.slider.options.centerMode&&t.slider.state.next>=t.slider.state.length-t.slider.slidesToShow&&!t.slider.options.loop&&!t.slider.options.infinite?t.stop():t.slider.next())},this.slider.options.autoplaySpeed))}},{key:"stop",value:function(){this._interval=clearInterval(this._interval),this.emit("stop",this)}},{key:"pause",value:function(){var t=this,e=0<arguments.length&&void 0!==arguments[0]?arguments[0]:0;this.paused||(this.timer&&this.stop(),this.paused=!0,0===e?(this.paused=!1,this.start()):this.slider.on("transition:end",function(){t&&(t.paused=!1,t.run?t.start():t.stop())}))}},{key:"onVisibilityChange",value:function(t){document.hidden?this.stop():this.start()}},{key:"onMouseEnter",value:function(t){this._hovering=!0,this.slider.options.pauseOnHover&&this.pause()}},{key:"onMouseLeave",value:function(t){this._hovering=!1,this.slider.options.pauseOnHover&&this.pause()}}]),i}();e.a=o},function(t,e,i){"use strict";i.d(e,"a",function(){return o}),i.d(e,"b",function(){return a});window.navigator.pointerEnabled||window.navigator.msPointerEnabled,window.navigator.msPointerEnabled&&1<window.navigator.msMaxTouchPoints||window.navigator.pointerEnabled&&window.navigator.maxTouchPoints,navigator.userAgent.match(/(Android);?[\s\/]+([\d.]+)?/),navigator.userAgent.match(/(iPad).*OS\s([\d_]+)/),navigator.userAgent.match(/(iPod)(.*OS\s([\d_]+))?/),!navigator.userAgent.match(/(iPad).*OS\s([\d_]+)/)&&navigator.userAgent.match(/(iPhone\sOS)\s([\d_]+)/),0<=navigator.userAgent.toLowerCase().indexOf("safari")&&navigator.userAgent.toLowerCase().indexOf("chrome")<0&&navigator.userAgent.toLowerCase().indexOf("android"),/(iPhone|iPod|iPad).*AppleWebKit(?!.*Safari)/i.test(navigator.userAgent);var n=!!("ontouchstart"in window),s=!!("PointerEvent"in window),r=n||window.DocumentTouch&&document instanceof DocumentTouch||navigator.maxTouchPoints,o=r&&s?"pointerenter":"mouseenter",a=r&&s?"pointerleave":"mouseleave"},function(t,e,i){"use strict";var n=function(){function n(t,e){for(var i=0;i<e.length;i++){var n=e[i];n.enumerable=n.enumerable||!1,n.configurable=!0,"value"in n&&(n.writable=!0),Object.defineProperty(t,n.key,n)}}return function(t,e,i){return e&&n(t.prototype,e),i&&n(t,i),t}}();var s=Symbol("onResize"),r=function(){function e(t){!function(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}(this,e),this.slider=t,this.options=t.options,this[s]=this[s].bind(this),this._bindEvents()}return n(e,[{key:"init",value:function(){return this._defaultBreakpoint={slidesToShow:this.options.slidesToShow,slidesToScroll:this.options.slidesToScroll},this.options.breakpoints.sort(function(t,e){return parseInt(t.changePoint,10)>parseInt(e.changePoint,10)}),this._currentBreakpoint=this._getActiveBreakpoint(),this}},{key:"destroy",value:function(){this._unbindEvents()}},{key:"_bindEvents",value:function(){window.addEventListener("resize",this[s]),window.addEventListener("orientationchange",this[s])}},{key:"_unbindEvents",value:function(){window.removeEventListener("resize",this[s]),window.removeEventListener("orientationchange",this[s])}},{key:"_getActiveBreakpoint",value:function(){var t=!0,e=!1,i=void 0;try{for(var n,s=this.options.breakpoints[Symbol.iterator]();!(t=(n=s.next()).done);t=!0){var r=n.value;if(r.changePoint>=window.innerWidth)return r}}catch(t){e=!0,i=t}finally{try{!t&&s.return&&s.return()}finally{if(e)throw i}}return this._defaultBreakpoint}},{key:"getSlidesToShow",value:function(){return this._currentBreakpoint?this._currentBreakpoint.slidesToShow:this._defaultBreakpoint.slidesToShow}},{key:"getSlidesToScroll",value:function(){return this._currentBreakpoint?this._currentBreakpoint.slidesToScroll:this._defaultBreakpoint.slidesToScroll}},{key:"apply",value:function(){this.slider.state.index>=this.slider.state.length&&0!==this.slider.state.index&&(this.slider.state.index=this.slider.state.index-this._currentBreakpoint.slidesToScroll),this.slider.state.length<=this._currentBreakpoint.slidesToShow&&(this.slider.state.index=0),this.options.loop&&this.slider._loop.init().apply(),this.options.infinite&&this.slider._infinite.init().apply(),this.slider._setDimensions(),this.slider._transitioner.init().apply(!0,this.slider._setHeight.bind(this.slider)),this.slider._setClasses(),this.slider._navigation.refresh(),this.slider._pagination.refresh()}},{key:s,value:function(t){var e=this._getActiveBreakpoint();e.slidesToShow!==this._currentBreakpoint.slidesToShow&&(this._currentBreakpoint=e,this.apply())}}]),e}();e.a=r},function(t,e,i){"use strict";var n=function(){function n(t,e){for(var i=0;i<e.length;i++){var n=e[i];n.enumerable=n.enumerable||!1,n.configurable=!0,"value"in n&&(n.writable=!0),Object.defineProperty(t,n.key,n)}}return function(t,e,i){return e&&n(t.prototype,e),i&&n(t,i),t}}();var s=function(){function e(t){!function(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}(this,e),this.slider=t}return n(e,[{key:"init",value:function(){if(this.slider.options.infinite&&"translate"===this.slider.options.effect){this.slider.options.centerMode?this._infiniteCount=Math.ceil(this.slider.slidesToShow+this.slider.slidesToShow/2):this._infiniteCount=this.slider.slidesToShow;for(var t=[],e=0,i=this.slider.state.length;i>this.slider.state.length-1-this._infiniteCount;i-=1)e=i-1,t.unshift(this._cloneSlide(this.slider.slides[e],e-this.slider.state.length));for(var n=[],s=0;s<this._infiniteCount+this.slider.state.length;s+=1)n.push(this._cloneSlide(this.slider.slides[s%this.slider.state.length],s+this.slider.state.length));this.slider.slides=[].concat(t,function(t){if(Array.isArray(t)){for(var e=0,i=Array(t.length);e<t.length;e++)i[e]=t[e];return i}return Array.from(t)}(this.slider.slides),n)}return this}},{key:"apply",value:function(){}},{key:"onTransitionEnd",value:function(t){this.slider.options.infinite&&(this.slider.state.next>=this.slider.state.length?(this.slider.state.index=this.slider.state.next=this.slider.state.next-this.slider.state.length,this.slider.transitioner.apply(!0)):this.slider.state.next<0&&(this.slider.state.index=this.slider.state.next=this.slider.state.length+this.slider.state.next,this.slider.transitioner.apply(!0)))}},{key:"_cloneSlide",value:function(t,e){var i=t.cloneNode(!0);return i.dataset.sliderIndex=e,i.dataset.cloned=!0,(i.querySelectorAll("[id]")||[]).forEach(function(t){t.setAttribute("id","")}),i}}]),e}();e.a=s},function(t,e,i){"use strict";var n=i(12),s=function(){function n(t,e){for(var i=0;i<e.length;i++){var n=e[i];n.enumerable=n.enumerable||!1,n.configurable=!0,"value"in n&&(n.writable=!0),Object.defineProperty(t,n.key,n)}}return function(t,e,i){return e&&n(t.prototype,e),i&&n(t,i),t}}();var r=function(){function e(t){!function(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}(this,e),this.slider=t}return s(e,[{key:"init",value:function(){return this}},{key:"apply",value:function(){this.slider.options.loop&&(0<this.slider.state.next?this.slider.state.next<this.slider.state.length?this.slider.state.next>this.slider.state.length-this.slider.slidesToShow&&Object(n.a)(this.slider._slides[this.slider.state.length-1],this.slider.wrapper)?this.slider.state.next=0:this.slider.state.next=Math.min(Math.max(this.slider.state.next,0),this.slider.state.length-this.slider.slidesToShow):this.slider.state.next=0:this.slider.state.next<=0-this.slider.slidesToScroll?this.slider.state.next=this.slider.state.length-this.slider.slidesToShow:this.slider.state.next=0)}}]),e}();e.a=r},function(t,e,i){"use strict";i.d(e,"a",function(){return n});var n=function(t,e){var i=t.getBoundingClientRect();return e=e||document.documentElement,0<=i.top&&0<=i.left&&i.bottom<=(window.innerHeight||e.clientHeight)&&i.right<=(window.innerWidth||e.clientWidth)}},function(t,e,i){"use strict";var n=i(14),s=i(1),r=function(){function n(t,e){for(var i=0;i<e.length;i++){var n=e[i];n.enumerable=n.enumerable||!1,n.configurable=!0,"value"in n&&(n.writable=!0),Object.defineProperty(t,n.key,n)}}return function(t,e,i){return e&&n(t.prototype,e),i&&n(t,i),t}}();var o=function(){function e(t){!function(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}(this,e),this.slider=t,this._clickEvents=["click","touch"],this._supportsPassive=Object(s.a)(),this.onPreviousClick=this.onPreviousClick.bind(this),this.onNextClick=this.onNextClick.bind(this),this.onKeyUp=this.onKeyUp.bind(this)}return r(e,[{key:"init",value:function(){return this.node=document.createRange().createContextualFragment(Object(n.a)(this.slider.options.icons)),this._ui={previous:this.node.querySelector(".slider-navigation-previous"),next:this.node.querySelector(".slider-navigation-next")},this._unbindEvents(),this._bindEvents(),this.refresh(),this}},{key:"destroy",value:function(){this._unbindEvents()}},{key:"_bindEvents",value:function(){var e=this;this.slider.wrapper.addEventListener("keyup",this.onKeyUp),this._clickEvents.forEach(function(t){e._ui.previous.addEventListener(t,e.onPreviousClick),e._ui.next.addEventListener(t,e.onNextClick)})}},{key:"_unbindEvents",value:function(){var e=this;this.slider.wrapper.removeEventListener("keyup",this.onKeyUp),this._clickEvents.forEach(function(t){e._ui.previous.removeEventListener(t,e.onPreviousClick),e._ui.next.removeEventListener(t,e.onNextClick)})}},{key:"onNextClick",value:function(t){this._supportsPassive||t.preventDefault(),this.slider.options.navigation&&this.slider.next()}},{key:"onPreviousClick",value:function(t){this._supportsPassive||t.preventDefault(),this.slider.options.navigation&&this.slider.previous()}},{key:"onKeyUp",value:function(t){this.slider.options.keyNavigation&&("ArrowRight"===t.key||"Right"===t.key?this.slider.next():"ArrowLeft"!==t.key&&"Left"!==t.key||this.slider.previous())}},{key:"refresh",value:function(){this.slider.options.loop||this.slider.options.infinite||(this.slider.options.navigation&&this.slider.state.length>this.slider.slidesToShow?(this._ui.previous.classList.remove("is-hidden"),this._ui.next.classList.remove("is-hidden"),0===this.slider.state.next?(this._ui.previous.classList.add("is-hidden"),this._ui.next.classList.remove("is-hidden")):this.slider.state.next>=this.slider.state.length-this.slider.slidesToShow&&!this.slider.options.centerMode?(this._ui.previous.classList.remove("is-hidden"),this._ui.next.classList.add("is-hidden")):this.slider.state.next>=this.slider.state.length-1&&this.slider.options.centerMode&&(this._ui.previous.classList.remove("is-hidden"),this._ui.next.classList.add("is-hidden"))):(this._ui.previous.classList.add("is-hidden"),this._ui.next.classList.add("is-hidden")))}},{key:"render",value:function(){return this.node}}]),e}();e.a=o},function(t,e,i){"use strict";e.a=function(t){return'<div class="slider-navigation-previous">'+t.previous+'</div>\n<div class="slider-navigation-next">'+t.next+"</div>"}},function(t,e,i){"use strict";var n=i(16),s=i(17),r=i(1),o=function(){function n(t,e){for(var i=0;i<e.length;i++){var n=e[i];n.enumerable=n.enumerable||!1,n.configurable=!0,"value"in n&&(n.writable=!0),Object.defineProperty(t,n.key,n)}}return function(t,e,i){return e&&n(t.prototype,e),i&&n(t,i),t}}();var a=function(){function e(t){!function(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}(this,e),this.slider=t,this._clickEvents=["click","touch"],this._supportsPassive=Object(r.a)(),this.onPageClick=this.onPageClick.bind(this),this.onResize=this.onResize.bind(this)}return o(e,[{key:"init",value:function(){return this._pages=[],this.node=document.createRange().createContextualFragment(Object(n.a)()),this._ui={container:this.node.firstChild},this._count=Math.ceil((this.slider.state.length-this.slider.slidesToShow)/this.slider.slidesToScroll),this._draw(),this.refresh(),this}},{key:"destroy",value:function(){this._unbindEvents()}},{key:"_bindEvents",value:function(){var i=this;window.addEventListener("resize",this.onResize),window.addEventListener("orientationchange",this.onResize),this._clickEvents.forEach(function(e){i._pages.forEach(function(t){return t.addEventListener(e,i.onPageClick)})})}},{key:"_unbindEvents",value:function(){var i=this;window.removeEventListener("resize",this.onResize),window.removeEventListener("orientationchange",this.onResize),this._clickEvents.forEach(function(e){i._pages.forEach(function(t){return t.removeEventListener(e,i.onPageClick)})})}},{key:"_draw",value:function(){if(this._ui.container.innerHTML="",this.slider.options.pagination&&this.slider.state.length>this.slider.slidesToShow){for(var t=0;t<=this._count;t++){var e=document.createRange().createContextualFragment(Object(s.a)()).firstChild;e.dataset.index=t*this.slider.slidesToScroll,this._pages.push(e),this._ui.container.appendChild(e)}this._bindEvents()}}},{key:"onPageClick",value:function(t){this._supportsPassive||t.preventDefault(),this.slider.state.next=t.currentTarget.dataset.index,this.slider.show()}},{key:"onResize",value:function(){this._draw()}},{key:"refresh",value:function(){var e=this,t=void 0;(t=this.slider.options.infinite?Math.ceil(this.slider.state.length-1/this.slider.slidesToScroll):Math.ceil((this.slider.state.length-this.slider.slidesToShow)/this.slider.slidesToScroll))!==this._count&&(this._count=t,this._draw()),this._pages.forEach(function(t){t.classList.remove("is-active"),parseInt(t.dataset.index,10)===e.slider.state.next%e.slider.state.length&&t.classList.add("is-active")})}},{key:"render",value:function(){return this.node}}]),e}();e.a=a},function(t,e,i){"use strict";e.a=function(){return'<div class="slider-pagination"></div>'}},function(t,e,i){"use strict";e.a=function(){return'<div class="slider-page"></div>'}},function(t,e,i){"use strict";var n=i(4),s=i(1),r=function(){function n(t,e){for(var i=0;i<e.length;i++){var n=e[i];n.enumerable=n.enumerable||!1,n.configurable=!0,"value"in n&&(n.writable=!0),Object.defineProperty(t,n.key,n)}}return function(t,e,i){return e&&n(t.prototype,e),i&&n(t,i),t}}();var o=function(){function e(t){!function(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}(this,e),this.slider=t,this._supportsPassive=Object(s.a)(),this.onStartDrag=this.onStartDrag.bind(this),this.onMoveDrag=this.onMoveDrag.bind(this),this.onStopDrag=this.onStopDrag.bind(this),this._init()}return r(e,[{key:"_init",value:function(){}},{key:"bindEvents",value:function(){var e=this;this.slider.container.addEventListener("dragstart",function(t){e._supportsPassive||t.preventDefault()}),this.slider.container.addEventListener("mousedown",this.onStartDrag),this.slider.container.addEventListener("touchstart",this.onStartDrag),window.addEventListener("mousemove",this.onMoveDrag),window.addEventListener("touchmove",this.onMoveDrag),window.addEventListener("mouseup",this.onStopDrag),window.addEventListener("touchend",this.onStopDrag),window.addEventListener("touchcancel",this.onStopDrag)}},{key:"unbindEvents",value:function(){var e=this;this.slider.container.removeEventListener("dragstart",function(t){e._supportsPassive||t.preventDefault()}),this.slider.container.removeEventListener("mousedown",this.onStartDrag),this.slider.container.removeEventListener("touchstart",this.onStartDrag),window.removeEventListener("mousemove",this.onMoveDrag),window.removeEventListener("touchmove",this.onMoveDrag),window.removeEventListener("mouseup",this.onStopDrag),window.removeEventListener("mouseup",this.onStopDrag),window.removeEventListener("touchcancel",this.onStopDrag)}},{key:"onStartDrag",value:function(t){if(t.touches){if(1<t.touches.length)return;t=t.touches[0]}this._origin=new n.a(t.screenX,t.screenY),this.width=this.slider.wrapperWidth,this.slider.transitioner.disable()}},{key:"onMoveDrag",value:function(t){if(this._origin){var e=t.touches?t.touches[0]:t;this._lastTranslate=new n.a(e.screenX-this._origin.x,e.screenY-this._origin.y),t.touches&&Math.abs(this._lastTranslate.x)>Math.abs(this._lastTranslate.y)&&(this._supportsPassive||t.preventDefault(),t.stopPropagation())}}},{key:"onStopDrag",value:function(t){this._origin&&this._lastTranslate&&(Math.abs(this._lastTranslate.x)>.2*this.width?this._lastTranslate.x<0?this.slider.next():this.slider.previous():this.slider.show(!0)),this._origin=null,this._lastTranslate=null}}]),e}();e.a=o},function(t,e,i){"use strict";var n=i(20),s=i(21),r=function(){function n(t,e){for(var i=0;i<e.length;i++){var n=e[i];n.enumerable=n.enumerable||!1,n.configurable=!0,"value"in n&&(n.writable=!0),Object.defineProperty(t,n.key,n)}}return function(t,e,i){return e&&n(t.prototype,e),i&&n(t,i),t}}();var o=function(){function e(t){!function(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}(this,e),this.slider=t,this.options=t.options,this._animating=!1,this._animation=void 0,this._translate=new s.a(this,t,t.options),this._fade=new n.a(this,t,t.options)}return r(e,[{key:"init",value:function(){return this._fade.init(),this._translate.init(),this}},{key:"isAnimating",value:function(){return this._animating}},{key:"enable",value:function(){this._animation&&this._animation.enable()}},{key:"disable",value:function(){this._animation&&this._animation.disable()}},{key:"apply",value:function(t,e){if(!this._animating||t){switch(this.options.effect){case"fade":this._animation=this._fade;break;case"translate":default:this._animation=this._translate}this._animationCallback=e,t?this._animation&&this._animation.disable():(this._animation&&this._animation.enable(),this._animating=!0),this._animation&&this._animation.apply(),t&&this.end()}}},{key:"end",value:function(){this._animating=!1,this._animation=void 0,this.slider.state.index=this.slider.state.next,this._animationCallback&&this._animationCallback()}}]),e}();e.a=o},function(t,e,i){"use strict";var s=i(0),r=Object.assign||function(t){for(var e=1;e<arguments.length;e++){var i=arguments[e];for(var n in i)Object.prototype.hasOwnProperty.call(i,n)&&(t[n]=i[n])}return t},o=function(){function n(t,e){for(var i=0;i<e.length;i++){var n=e[i];n.enumerable=n.enumerable||!1,n.configurable=!0,"value"in n&&(n.writable=!0),Object.defineProperty(t,n.key,n)}}return function(t,e,i){return e&&n(t.prototype,e),i&&n(t,i),t}}();var n=function(){function n(t,e){var i=2<arguments.length&&void 0!==arguments[2]?arguments[2]:{};!function(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}(this,n),this.transitioner=t,this.slider=e,this.options=r({},i)}return o(n,[{key:"init",value:function(){var i=this;return"fade"===this.options.effect&&this.slider.slides.forEach(function(t,e){Object(s.a)(t,{position:"absolute",left:0,top:0,bottom:0,"z-index":t.dataset.sliderIndex==i.slider.state.index?0:-2,opacity:t.dataset.sliderIndex==i.slider.state.index?1:0})}),this}},{key:"enable",value:function(){var e=this;this._oldSlide=this.slider.slides.filter(function(t){return t.dataset.sliderIndex==e.slider.state.index})[0],this._newSlide=this.slider.slides.filter(function(t){return t.dataset.sliderIndex==e.slider.state.next})[0],this._newSlide&&(this._newSlide.addEventListener("transitionend",this.onTransitionEnd.bind(this)),this._newSlide.style.transition=this.options.duration+"ms "+this.options.timing,this._oldSlide&&(this._oldSlide.addEventListener("transitionend",this.onTransitionEnd.bind(this)),this._oldSlide.style.transition=this.options.duration+"ms "+this.options.timing))}},{key:"disable",value:function(){var e=this;this._oldSlide=this.slider.slides.filter(function(t){return t.dataset.sliderIndex==e.slider.state.index})[0],this._newSlide=this.slider.slides.filter(function(t){return t.dataset.sliderIndex==e.slider.state.next})[0],this._newSlide&&(this._newSlide.removeEventListener("transitionend",this.onTransitionEnd.bind(this)),this._newSlide.style.transition="none",this._oldSlide&&(this._oldSlide.removeEventListener("transitionend",this.onTransitionEnd.bind(this)),this._oldSlide.style.transition="none"))}},{key:"apply",value:function(t){var e=this;this._oldSlide=this.slider.slides.filter(function(t){return t.dataset.sliderIndex==e.slider.state.index})[0],this._newSlide=this.slider.slides.filter(function(t){return t.dataset.sliderIndex==e.slider.state.next})[0],this._oldSlide&&this._newSlide&&(Object(s.a)(this._oldSlide,{opacity:0}),Object(s.a)(this._newSlide,{opacity:1,"z-index":t?0:-1}))}},{key:"onTransitionEnd",value:function(t){"fade"===this.options.effect&&(this.transitioner.isAnimating()&&t.target==this._newSlide&&(this._newSlide&&(Object(s.a)(this._newSlide,{"z-index":0}),this._newSlide.removeEventListener("transitionend",this.onTransitionEnd.bind(this))),this._oldSlide&&(Object(s.a)(this._oldSlide,{"z-index":-2}),this._oldSlide.removeEventListener("transitionend",this.onTransitionEnd.bind(this)))),this.transitioner.end())}}]),n}();e.a=n},function(t,e,i){"use strict";var r=i(4),o=i(0),s=Object.assign||function(t){for(var e=1;e<arguments.length;e++){var i=arguments[e];for(var n in i)Object.prototype.hasOwnProperty.call(i,n)&&(t[n]=i[n])}return t},a=function(){function n(t,e){for(var i=0;i<e.length;i++){var n=e[i];n.enumerable=n.enumerable||!1,n.configurable=!0,"value"in n&&(n.writable=!0),Object.defineProperty(t,n.key,n)}}return function(t,e,i){return e&&n(t.prototype,e),i&&n(t,i),t}}();var n=function(){function n(t,e){var i=2<arguments.length&&void 0!==arguments[2]?arguments[2]:{};!function(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}(this,n),this.transitioner=t,this.slider=e,this.options=s({},i),this.onTransitionEnd=this.onTransitionEnd.bind(this)}return a(n,[{key:"init",value:function(){return this._position=new r.a(this.slider.container.offsetLeft,this.slider.container.offsetTop),this._bindEvents(),this}},{key:"destroy",value:function(){this._unbindEvents()}},{key:"_bindEvents",value:function(){this.slider.container.addEventListener("transitionend",this.onTransitionEnd)}},{key:"_unbindEvents",value:function(){this.slider.container.removeEventListener("transitionend",this.onTransitionEnd)}},{key:"enable",value:function(){this.slider.container.style.transition=this.options.duration+"ms "+this.options.timing}},{key:"disable",value:function(){this.slider.container.style.transition="none"}},{key:"apply",value:function(){var e=this,t=void 0;if("translate"===this.options.effect){var i=this.slider.slides.filter(function(t){return t.dataset.sliderIndex==e.slider.state.next})[0],n=new r.a(i.offsetLeft,i.offsetTop);t=this.options.centerMode?new r.a(Math.round(Object(o.e)(this.slider.container)),Math.round(Object(o.b)(this.slider.container))):new r.a(Math.round(Object(o.e)(this.slider.container)-Object(o.e)(this.slider.wrapper)),Math.round(Object(o.b)(this.slider.container)-Object(o.b)(this.slider.wrapper)));var s=new r.a(Math.min(Math.max(-1*n.x,-1*t.x),0),Math.min(Math.max(-1*n.y,-1*t.y),0));this.options.loop&&(!this.options.vertical&&Math.abs(this._position.x)>t.x?(s.x=0,this.slider.state.next=0):this.options.vertical&&Math.abs(this._position.y)>t.y&&(s.y=0,this.slider.state.next=0)),this._position.x=s.x,this._position.y=s.y,this.options.centerMode&&(this._position.x=this._position.x+this.slider.wrapperWidth/2-Object(o.e)(i)/2),"rtl"===this.slider.direction&&(this._position.x=-this._position.x,this._position.y=-this._position.y),this.slider.container.style.transform="translate3d("+this._position.x+"px, "+this._position.y+"px, 0)",n.x>t.x&&this.slider.transitioner.end()}}},{key:"onTransitionEnd",value:function(t){"translate"===this.options.effect&&(this.transitioner.isAnimating()&&t.target==this.slider.container&&this.options.infinite&&this.slider._infinite.onTransitionEnd(t),this.transitioner.end())}}]),n}();e.a=n},function(t,e,i){"use strict";e.a={initialSlide:0,slidesToScroll:1,slidesToShow:1,navigation:!0,navigationKeys:!0,navigationSwipe:!0,pagination:!0,loop:!1,infinite:!1,effect:"translate",duration:300,timing:"ease",autoplay:!1,autoplaySpeed:3e3,pauseOnHover:!0,breakpoints:[{changePoint:480,slidesToShow:1,slidesToScroll:1},{changePoint:640,slidesToShow:2,slidesToScroll:2},{changePoint:768,slidesToShow:3,slidesToScroll:3}],onReady:null,icons:{previous:'<svg viewBox="0 0 50 80" xml:space="preserve">\n <polyline fill="currentColor" stroke-width=".5em" stroke-linecap="round" stroke-linejoin="round" points="45.63,75.8 0.375,38.087 45.63,0.375 "/>\n </svg>',next:'<svg viewBox="0 0 50 80" xml:space="preserve">\n <polyline fill="currentColor" stroke-width=".5em" stroke-linecap="round" stroke-linejoin="round" points="0.375,0.375 45.63,38.087 0.375,75.8 "/>\n </svg>'}}},function(t,e,i){"use strict";e.a=function(t){return'<div id="'+t+'" class="slider" tabindex="0">\n <div class="slider-container"></div>\n </div>'}},function(t,e,i){"use strict";e.a=function(){return'<div class="slider-item"></div>'}}]).default});
docs/static/js/bulma-slider.min.js ADDED
@@ -0,0 +1 @@
 
 
1
+ !function(t,e){"object"==typeof exports&&"object"==typeof module?module.exports=e():"function"==typeof define&&define.amd?define([],e):"object"==typeof exports?exports.bulmaSlider=e():t.bulmaSlider=e()}("undefined"!=typeof self?self:this,function(){return function(n){var r={};function i(t){if(r[t])return r[t].exports;var e=r[t]={i:t,l:!1,exports:{}};return n[t].call(e.exports,e,e.exports,i),e.l=!0,e.exports}return i.m=n,i.c=r,i.d=function(t,e,n){i.o(t,e)||Object.defineProperty(t,e,{configurable:!1,enumerable:!0,get:n})},i.n=function(t){var e=t&&t.__esModule?function(){return t.default}:function(){return t};return i.d(e,"a",e),e},i.o=function(t,e){return Object.prototype.hasOwnProperty.call(t,e)},i.p="",i(i.s=0)}([function(t,e,n){"use strict";Object.defineProperty(e,"__esModule",{value:!0}),n.d(e,"isString",function(){return l});var r=n(1),i=Object.assign||function(t){for(var e=1;e<arguments.length;e++){var n=arguments[e];for(var r in n)Object.prototype.hasOwnProperty.call(n,r)&&(t[r]=n[r])}return t},u=function(){function r(t,e){for(var n=0;n<e.length;n++){var r=e[n];r.enumerable=r.enumerable||!1,r.configurable=!0,"value"in r&&(r.writable=!0),Object.defineProperty(t,r.key,r)}}return function(t,e,n){return e&&r(t.prototype,e),n&&r(t,n),t}}(),o="function"==typeof Symbol&&"symbol"==typeof Symbol.iterator?function(t){return typeof t}:function(t){return t&&"function"==typeof Symbol&&t.constructor===Symbol&&t!==Symbol.prototype?"symbol":typeof t};var l=function(t){return"string"==typeof t||!!t&&"object"===(void 0===t?"undefined":o(t))&&"[object String]"===Object.prototype.toString.call(t)},a=function(t){function o(t){var e=1<arguments.length&&void 0!==arguments[1]?arguments[1]:{};!function(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}(this,o);var n=function(t,e){if(!t)throw new ReferenceError("this hasn't been initialised - super() hasn't been called");return!e||"object"!=typeof e&&"function"!=typeof e?t:e}(this,(o.__proto__||Object.getPrototypeOf(o)).call(this));if(n.element="string"==typeof t?document.querySelector(t):t,!n.element)throw new Error("An invalid selector or non-DOM node has been provided.");return n._clickEvents=["click"],n.options=i({},e),n.onSliderInput=n.onSliderInput.bind(n),n.init(),n}return function(t,e){if("function"!=typeof e&&null!==e)throw new TypeError("Super expression must either be null or a function, not "+typeof e);t.prototype=Object.create(e&&e.prototype,{constructor:{value:t,enumerable:!1,writable:!0,configurable:!0}}),e&&(Object.setPrototypeOf?Object.setPrototypeOf(t,e):t.__proto__=e)}(o,r["a"]),u(o,[{key:"init",value:function(){if(this._id="bulmaSlider"+(new Date).getTime()+Math.floor(Math.random()*Math.floor(9999)),this.output=this._findOutputForSlider(),this._bindEvents(),this.output&&this.element.classList.contains("has-output-tooltip")){var t=this._getSliderOutputPosition();this.output.style.left=t.position}this.emit("bulmaslider:ready",this.element.value)}},{key:"_findOutputForSlider",value:function(){var e=this,n=null,t=document.getElementsByTagName("output")||[];return Array.from(t).forEach(function(t){if(t.htmlFor==e.element.getAttribute("id"))return n=t,!0}),n}},{key:"_getSliderOutputPosition",value:function(){var t,e=window.getComputedStyle(this.element,null),n=parseInt(e.getPropertyValue("width"),10);t=this.element.getAttribute("min")?this.element.getAttribute("min"):0;var r=(this.element.value-t)/(this.element.getAttribute("max")-t);return{position:(r<0?0:1<r?n:n*r)+"px"}}},{key:"_bindEvents",value:function(){this.output&&this.element.addEventListener("input",this.onSliderInput,!1)}},{key:"onSliderInput",value:function(t){if(t.preventDefault(),this.element.classList.contains("has-output-tooltip")){var e=this._getSliderOutputPosition();this.output.style.left=e.position}var n=this.output.hasAttribute("data-prefix")?this.output.getAttribute("data-prefix"):"",r=this.output.hasAttribute("data-postfix")?this.output.getAttribute("data-postfix"):"";this.output.value=n+this.element.value+r,this.emit("bulmaslider:ready",this.element.value)}}],[{key:"attach",value:function(){var n=this,t=0<arguments.length&&void 0!==arguments[0]?arguments[0]:'input[type="range"].slider',r=1<arguments.length&&void 0!==arguments[1]?arguments[1]:{},i=new Array;return(l(t)?document.querySelectorAll(t):Array.isArray(t)?t:[t]).forEach(function(t){if(void 0===t[n.constructor.name]){var e=new o(t,r);t[n.constructor.name]=e,i.push(e)}else i.push(t[n.constructor.name])}),i}}]),o}();e.default=a},function(t,e,n){"use strict";var r=function(){function r(t,e){for(var n=0;n<e.length;n++){var r=e[n];r.enumerable=r.enumerable||!1,r.configurable=!0,"value"in r&&(r.writable=!0),Object.defineProperty(t,r.key,r)}}return function(t,e,n){return e&&r(t.prototype,e),n&&r(t,n),t}}();var i=function(){function e(){var t=0<arguments.length&&void 0!==arguments[0]?arguments[0]:[];!function(t,e){if(!(t instanceof e))throw new TypeError("Cannot call a class as a function")}(this,e),this._listeners=new Map(t),this._middlewares=new Map}return r(e,[{key:"listenerCount",value:function(t){return this._listeners.has(t)?this._listeners.get(t).length:0}},{key:"removeListeners",value:function(){var e=this,t=0<arguments.length&&void 0!==arguments[0]?arguments[0]:null,n=1<arguments.length&&void 0!==arguments[1]&&arguments[1];null!==t?Array.isArray(t)?name.forEach(function(t){return e.removeListeners(t,n)}):(this._listeners.delete(t),n&&this.removeMiddleware(t)):this._listeners=new Map}},{key:"middleware",value:function(t,e){var n=this;Array.isArray(t)?name.forEach(function(t){return n.middleware(t,e)}):(Array.isArray(this._middlewares.get(t))||this._middlewares.set(t,[]),this._middlewares.get(t).push(e))}},{key:"removeMiddleware",value:function(){var e=this,t=0<arguments.length&&void 0!==arguments[0]?arguments[0]:null;null!==t?Array.isArray(t)?name.forEach(function(t){return e.removeMiddleware(t)}):this._middlewares.delete(t):this._middlewares=new Map}},{key:"on",value:function(t,e){var n=this,r=2<arguments.length&&void 0!==arguments[2]&&arguments[2];if(Array.isArray(t))t.forEach(function(t){return n.on(t,e)});else{var i=(t=t.toString()).split(/,|, | /);1<i.length?i.forEach(function(t){return n.on(t,e)}):(Array.isArray(this._listeners.get(t))||this._listeners.set(t,[]),this._listeners.get(t).push({once:r,callback:e}))}}},{key:"once",value:function(t,e){this.on(t,e,!0)}},{key:"emit",value:function(n,r){var i=this,o=2<arguments.length&&void 0!==arguments[2]&&arguments[2];n=n.toString();var u=this._listeners.get(n),l=null,a=0,s=o;if(Array.isArray(u))for(u.forEach(function(t,e){o||(l=i._middlewares.get(n),Array.isArray(l)?(l.forEach(function(t){t(r,function(){var t=0<arguments.length&&void 0!==arguments[0]?arguments[0]:null;null!==t&&(r=t),a++},n)}),a>=l.length&&(s=!0)):s=!0),s&&(t.once&&(u[e]=null),t.callback(r))});-1!==u.indexOf(null);)u.splice(u.indexOf(null),1)}}]),e}();e.a=i}]).default});
docs/static/js/fontawesome.all.min.js ADDED
The diff for this file is too large to render. See raw diff
 
docs/static/js/index.js ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ window.HELP_IMPROVE_VIDEOJS = false;
2
+
3
+ var INTERP_BASE = "./static/interpolation/stacked";
4
+ var NUM_INTERP_FRAMES = 240;
5
+
6
+ var interp_images = [];
7
+ function preloadInterpolationImages() {
8
+ for (var i = 0; i < NUM_INTERP_FRAMES; i++) {
9
+ var path = INTERP_BASE + '/' + String(i).padStart(6, '0') + '.jpg';
10
+ interp_images[i] = new Image();
11
+ interp_images[i].src = path;
12
+ }
13
+ }
14
+
15
+ function setInterpolationImage(i) {
16
+ var image = interp_images[i];
17
+ image.ondragstart = function() { return false; };
18
+ image.oncontextmenu = function() { return false; };
19
+ $('#interpolation-image-wrapper').empty().append(image);
20
+ }
21
+
22
+
23
+ $(document).ready(function() {
24
+ // Check for click events on the navbar burger icon
25
+ $(".navbar-burger").click(function() {
26
+ // Toggle the "is-active" class on both the "navbar-burger" and the "navbar-menu"
27
+ $(".navbar-burger").toggleClass("is-active");
28
+ $(".navbar-menu").toggleClass("is-active");
29
+
30
+ });
31
+
32
+ var options = {
33
+ slidesToScroll: 1,
34
+ slidesToShow: 3,
35
+ loop: true,
36
+ infinite: true,
37
+ autoplay: false,
38
+ autoplaySpeed: 3000,
39
+ }
40
+
41
+ // Initialize all div with carousel class
42
+ var carousels = bulmaCarousel.attach('.carousel', options);
43
+
44
+ // Loop on each carousel initialized
45
+ for(var i = 0; i < carousels.length; i++) {
46
+ // Add listener to event
47
+ carousels[i].on('before:show', state => {
48
+ console.log(state);
49
+ });
50
+ }
51
+
52
+ // Access to bulmaCarousel instance of an element
53
+ var element = document.querySelector('#my-element');
54
+ if (element && element.bulmaCarousel) {
55
+ // bulmaCarousel instance is available as element.bulmaCarousel
56
+ element.bulmaCarousel.on('before-show', function(state) {
57
+ console.log(state);
58
+ });
59
+ }
60
+
61
+ /*var player = document.getElementById('interpolation-video');
62
+ player.addEventListener('loadedmetadata', function() {
63
+ $('#interpolation-slider').on('input', function(event) {
64
+ console.log(this.value, player.duration);
65
+ player.currentTime = player.duration / 100 * this.value;
66
+ })
67
+ }, false);*/
68
+ preloadInterpolationImages();
69
+
70
+ $('#interpolation-slider').on('input', function(event) {
71
+ setInterpolationImage(this.value);
72
+ });
73
+ setInterpolationImage(0);
74
+ $('#interpolation-slider').prop('max', NUM_INTERP_FRAMES - 1);
75
+
76
+ bulmaSlider.attach();
77
+
78
+ })
env.yml ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ name: calligrapher
2
+ channels:
3
+ - pytorch
4
+ - conda-forge
5
+ - defaults
6
+ dependencies:
7
+ - python=3.10.15
8
+ - pytorch==2.5.0
9
+ - torchvision==0.20.0
10
+ - numpy=1.26.4
11
+ - pillow=11.0.0
12
+ - protobuf=5.28.3
13
+ - sentencepiece=0.2.0
14
+ - pip
15
+ - pip:
16
+ - diffusers==0.33.0
17
+ - transformers==4.49.0
18
+ - opencv-python==4.11.0.86
19
+ - accelerate==0.30.1
20
+ - gradio==5.23.3
gradio_demo.py ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Gradio demo for text customization with Calligrapher (the reference is uploaded by the user).
3
+
4
+ """
5
+
6
+ import os
7
+ import json
8
+ import gradio as gr
9
+ import numpy as np
10
+ from datetime import datetime
11
+ import torch
12
+ from PIL import Image
13
+
14
+ from pipeline_calligrapher import CalligrapherPipeline
15
+ from models.calligrapher import Calligrapher
16
+ from models.transformer_flux_inpainting import FluxTransformer2DModel
17
+ from utils import process_gradio_source, get_bbox_from_mask, crop_image_from_bb, \
18
+ resize_img_and_pad, generate_context_reference_image
19
+
20
+ # Global settings.
21
+ with open(os.path.join(os.path.dirname(__file__), 'path_dict.json'), 'r') as f:
22
+ path_dict = json.load(f)
23
+ SAVE_DIR = path_dict['gradio_save_dir']
24
+ os.environ["GRADIO_TEMP_DIR"] = path_dict['gradio_temp_dir']
25
+ os.environ['TMPDIR'] = path_dict['gradio_temp_dir']
26
+
27
+
28
+ # Function of loading pre-trained models.
29
+ def load_models():
30
+ base_model_path = path_dict['base_model_path']
31
+ image_encoder_path = path_dict['image_encoder_path']
32
+ calligrapher_path = path_dict['calligrapher_path']
33
+ transformer = FluxTransformer2DModel.from_pretrained(base_model_path, subfolder="transformer",
34
+ torch_dtype=torch.bfloat16)
35
+ pipe = CalligrapherPipeline.from_pretrained(base_model_path, transformer=transformer,
36
+ torch_dtype=torch.bfloat16).to("cuda")
37
+ model = Calligrapher(pipe, image_encoder_path, calligrapher_path, device="cuda", num_tokens=128)
38
+ return model
39
+
40
+
41
+ # Init models.
42
+ model = load_models()
43
+ print('Model loaded!')
44
+
45
+
46
+ def process_and_generate(editor_component, reference_image, prompt, height, width,
47
+ scale, steps=50, seed=42, use_context=True, num_images=1):
48
+ print('Begin processing!')
49
+ # Job directory.
50
+ job_name = datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
51
+ job_dir = os.path.join(SAVE_DIR, job_name)
52
+ os.makedirs(job_dir, exist_ok=True)
53
+
54
+ # Get source, mask, and cropped images from gr.ImageEditor.
55
+ source_image, mask_image, cropped_image = process_gradio_source(editor_component)
56
+ source_image.save(os.path.join(job_dir, 'source_image.png'))
57
+ mask_image.save(os.path.join(job_dir, 'mask_image.png'))
58
+ cropped_image.save(os.path.join(job_dir, 'cropped_image.png'))
59
+
60
+ # Resize source and mask.
61
+ source_image = source_image.resize((width, height))
62
+ mask_image = mask_image.resize((width, height), Image.NEAREST)
63
+ mask_np = np.array(mask_image)
64
+ mask_np[mask_np > 0] = 255
65
+ mask_image = Image.fromarray(mask_np.astype(np.uint8))
66
+
67
+ if reference_image is None:
68
+ # If self-inpaint (no input ref): (1) get bounding box from the mask and (2) perform cropping to get the ref image.
69
+ tl, br = get_bbox_from_mask(mask_image)
70
+ # Convert irregularly shaped masks into rectangles.
71
+ reference_image = crop_image_from_bb(source_image, tl, br)
72
+ # Raw reference image before resizing.
73
+ reference_image.save(os.path.join(job_dir, 'reference_image_raw.png'))
74
+ reference_image_to_encoder = resize_img_and_pad(reference_image, target_size=(512, 512))
75
+ reference_image_to_encoder.save(os.path.join(job_dir, 'reference_to_encoder.png'))
76
+ reference_context = generate_context_reference_image(reference_image, width)
77
+
78
+ if use_context:
79
+ # Concat the context on the top of the input masked image in the pixel space.
80
+ source_with_context = Image.new(source_image.mode, (width, reference_context.size[1] + height))
81
+ source_with_context.paste(reference_context, (0, 0))
82
+ source_with_context.paste(source_image, (0, reference_context.size[1]))
83
+ # Concat the zero mask on the top of the mask image.
84
+ mask_with_context = Image.new(mask_image.mode,
85
+ (mask_image.size[0], reference_context.size[1] + mask_image.size[0]), color=0)
86
+ mask_with_context.paste(mask_image, (0, reference_context.size[1]))
87
+
88
+ source_image = source_with_context
89
+ mask_image = mask_with_context
90
+
91
+ all_generated_images = []
92
+ for i in range(num_images):
93
+ res = model.generate(
94
+ image=source_image,
95
+ mask_image=mask_image,
96
+ ref_image=reference_image_to_encoder,
97
+ prompt=prompt,
98
+ scale=scale,
99
+ num_inference_steps=steps,
100
+ width=source_image.size[0],
101
+ height=source_image.size[1],
102
+ seed=seed + i,
103
+ )[0]
104
+ if use_context:
105
+ res_vis = res.crop((0, reference_context.size[1], res.width, res.height)) # remove context
106
+ mask_vis = mask_image.crop(
107
+ (0, reference_context.size[1], mask_image.width, mask_image.height)) # remove context mask
108
+ else:
109
+ res_vis = res
110
+ mask_vis = mask_image
111
+ res_vis.save(os.path.join(job_dir, f'result_{i}.png'))
112
+ all_generated_images.append((res_vis, f"Generating {i + 1} (Seed: {seed + i})"))
113
+
114
+ return mask_vis, reference_image_to_encoder, all_generated_images
115
+
116
+
117
+ # Main gradio codes.
118
+ with gr.Blocks(theme="default", css=".image-editor img {max-width: 70%; height: 70%;}") as demo:
119
+ gr.Markdown(
120
+ """
121
+ # 🖌️ Calligrapher: Freestyle Text Image Customization
122
+ """
123
+ )
124
+
125
+ with gr.Row():
126
+ with gr.Column(scale=3):
127
+ gr.Markdown("### 🎨 Image Editing Panel")
128
+ editor_component = gr.ImageEditor(
129
+ label="Upload or Draw",
130
+ type="pil",
131
+ brush=gr.Brush(colors=["#FFFFFF"], default_size=30, color_mode="fixed"),
132
+ layers=True,
133
+ interactive=True,
134
+ )
135
+
136
+ gr.Markdown("### 📤 Output Result")
137
+ gallery = gr.Gallery(label="🖼️ Result Gallery")
138
+ gr.Markdown(
139
+ """<br>
140
+
141
+ ### ✨User Tips:
142
+
143
+ 1. **Speed vs Quality Trade-off.** Use fewer steps (e.g., 10-step which takes ~4s/image on a single A6000 GPU) for faster generation, but quality may be lower.
144
+
145
+ 2. **Inpaint Position Freedom.** Inpainting positions are flexible - they don't necessarily need to match the original text locations in the input image.
146
+
147
+ 3. **Iterative Editing.** Drag outputs from the gallery to the Image Editing Panel (clean the Editing Panel first) for quick refinements.
148
+
149
+ 4. **Mask Optimization.** Adjust mask size/aspect ratio to match your desired content. The model tends to fill the masks, and harmonizes the generation with background in terms of color and lighting.
150
+
151
+ 5. **Reference Image Tip.** White-background references improve style consistency - the encoder also considers background context of the given reference image.
152
+
153
+ 6. **Resolution Balance.** Very high-resolution generation sometimes triggers spelling errors. 512/768px are recommended considering the model is trained under the resolution of 512.
154
+ """
155
+ )
156
+ with gr.Column(scale=1):
157
+ gr.Markdown("### ⚙️Settings")
158
+ reference_image = gr.Image(
159
+ label="🧩 Reference Image (skip this if self-reference)",
160
+ sources=["upload"],
161
+ type="pil",
162
+ )
163
+ prompt = gr.Textbox(
164
+ label="📝 Prompt",
165
+ placeholder="The text is 'Image'...",
166
+ value="The text is 'Image'."
167
+ )
168
+
169
+ with gr.Accordion("🔧 Additional Settings", open=True):
170
+ with gr.Row():
171
+ height = gr.Number(label="Height", value=512, precision=0)
172
+ width = gr.Number(label="Width", value=512, precision=0)
173
+ scale = gr.Slider(0.0, 2.0, 1.0, step=0.1, value=1.0, label="🎚️ Strength")
174
+ steps = gr.Slider(1, 100, 50, step=1, label="🔁 Steps")
175
+ with gr.Row():
176
+ seed = gr.Number(label="🎲 Seed", value=56, precision=0)
177
+ use_context = gr.Checkbox(value=True, label="🔍 Use Context", interactive=True)
178
+ num_images = gr.Slider(1, 16, 2, step=1, label="🖼️ Sample Amount")
179
+
180
+ run_btn = gr.Button("🚀 Run", variant="primary")
181
+
182
+ mask_output = gr.Image(label="🟩 Mask Demo")
183
+ reference_demo = gr.Image(label="🧩 Reference Demo")
184
+
185
+ # Run button event.
186
+ run_btn.click(
187
+ fn=process_and_generate,
188
+ inputs=[
189
+ editor_component,
190
+ reference_image,
191
+ prompt,
192
+ height,
193
+ width,
194
+ scale,
195
+ steps,
196
+ seed,
197
+ use_context,
198
+ num_images
199
+ ],
200
+ outputs=[
201
+ mask_output,
202
+ reference_demo,
203
+ gallery
204
+ ]
205
+ )
206
+
207
+ if __name__ == "__main__":
208
+ demo.launch(server_name="0.0.0.0", server_port=1234, share=False)
gradio_demo_multilingual.py ADDED
@@ -0,0 +1,194 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Gradio demo for text customization with Calligrapher (the reference is uploaded by the user),
3
+ which supports multilingual text image customization.
4
+ Acknowledgement: Supported by TextFLUX: https://github.com/yyyyyxie/textflux.
5
+ """
6
+
7
+ import os
8
+ import json
9
+ import gradio as gr
10
+ import numpy as np
11
+ from datetime import datetime
12
+ import torch
13
+ from PIL import Image
14
+
15
+ from pipeline_calligrapher import CalligrapherPipeline
16
+ from models.calligrapher import Calligrapher
17
+ from models.transformer_flux_inpainting import FluxTransformer2DModel
18
+ from utils import process_gradio_source, get_bbox_from_mask, crop_image_from_bb, resize_img_and_pad
19
+ from utils_multilingual import run_multilingual_inference
20
+
21
+ # Global settings.
22
+ with open(os.path.join(os.path.dirname(__file__), 'path_dict.json'), 'r') as f:
23
+ path_dict = json.load(f)
24
+ SAVE_DIR = path_dict['gradio_save_dir']
25
+ os.environ["GRADIO_TEMP_DIR"] = path_dict['gradio_temp_dir']
26
+ os.environ['TMPDIR'] = path_dict['gradio_temp_dir']
27
+
28
+
29
+ # Function of loading pre-trained models.
30
+ def load_models():
31
+ base_model_path = path_dict['base_model_path']
32
+ image_encoder_path = path_dict['image_encoder_path']
33
+ calligrapher_path = path_dict['calligrapher_path']
34
+ textflux_path = path_dict['textflux_path']
35
+ transformer = FluxTransformer2DModel.from_pretrained(base_model_path, subfolder="transformer",
36
+ torch_dtype=torch.bfloat16)
37
+ # Load textflux lora weights.
38
+ state_dict, network_alphas = CalligrapherPipeline.lora_state_dict(
39
+ pretrained_model_name_or_path_or_dict=textflux_path,
40
+ return_alphas=True
41
+ )
42
+ is_correct_format = all("lora" in key or "dora_scale" in key for key in state_dict.keys())
43
+ if not is_correct_format:
44
+ raise ValueError("Invalid LoRA checkpoint!")
45
+ CalligrapherPipeline.load_lora_into_transformer(
46
+ state_dict=state_dict,
47
+ network_alphas=network_alphas,
48
+ transformer=transformer,
49
+ )
50
+ pipe = CalligrapherPipeline.from_pretrained(base_model_path, transformer=transformer,
51
+ torch_dtype=torch.bfloat16).to("cuda")
52
+ model = Calligrapher(pipe, image_encoder_path, calligrapher_path, device="cuda", num_tokens=128)
53
+ return model
54
+
55
+
56
+ # Init models.
57
+ model = load_models()
58
+ print('Model loaded!')
59
+
60
+
61
+ def process_and_generate(editor_component, reference_image, prompt, height, width,
62
+ scale, steps=50, seed=42, num_images=1):
63
+ print('Begin processing!')
64
+ # Job directory.
65
+ job_name = datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
66
+ job_dir = os.path.join(SAVE_DIR, job_name)
67
+ os.makedirs(job_dir, exist_ok=True)
68
+
69
+ # Get source, mask, and cropped images from gr.ImageEditor.
70
+ source_image, mask_image, cropped_image = process_gradio_source(editor_component)
71
+ source_image.save(os.path.join(job_dir, 'source_image.png'))
72
+ mask_image.save(os.path.join(job_dir, 'mask_image.png'))
73
+ cropped_image.save(os.path.join(job_dir, 'cropped_image.png'))
74
+
75
+ # Resize source and mask.
76
+ source_image = source_image.resize((width, height))
77
+ mask_image = mask_image.resize((width, height), Image.NEAREST)
78
+ mask_np = np.array(mask_image)
79
+ mask_np[mask_np > 0] = 255
80
+ mask_image = Image.fromarray(mask_np.astype(np.uint8))
81
+
82
+ if reference_image is None:
83
+ # If self-inpaint (no input ref): (1) get bounding box from the mask and (2) perform cropping to get the ref image.
84
+ tl, br = get_bbox_from_mask(mask_image)
85
+ # Convert irregularly shaped masks into rectangles.
86
+ reference_image = crop_image_from_bb(source_image, tl, br)
87
+ # Raw reference image before resizing.
88
+ reference_image.save(os.path.join(job_dir, 'reference_image_raw.png'))
89
+ reference_image_to_encoder = resize_img_and_pad(reference_image, target_size=(512, 512))
90
+ reference_image_to_encoder.save(os.path.join(job_dir, 'reference_to_encoder.png'))
91
+
92
+ all_generated_images = run_multilingual_inference(model, source_image, mask_image, reference_image_to_encoder,
93
+ prompt, num_steps=steps, seed=seed, num_images=num_images)
94
+ vis_all_generated_images = []
95
+ for i in range(len(all_generated_images)):
96
+ res = all_generated_images[i]
97
+ res_vis = res.crop((source_image.width, 0, res.width, res.height))
98
+ mask_vis = mask_image
99
+ res_vis.save(os.path.join(job_dir, f'result_{i}.png'))
100
+ vis_all_generated_images.append((res_vis, f"Generated #{i + 1} (Seed: {seed + i})"))
101
+ return mask_vis, reference_image_to_encoder, vis_all_generated_images
102
+
103
+
104
+ # Main gradio codes.
105
+ with gr.Blocks(theme="default", css=".image-editor img {max-width: 70%; height: 70%;}") as demo:
106
+ gr.Markdown(
107
+ """
108
+ # 🖌️ Calligrapher: Freestyle Text Image Customization (Multilingual)
109
+ """
110
+ )
111
+
112
+ with gr.Row():
113
+ with gr.Column(scale=3):
114
+ gr.Markdown("### 🎨 Image Editing Panel")
115
+ editor_component = gr.ImageEditor(
116
+ label="Upload or Draw",
117
+ type="pil",
118
+ brush=gr.Brush(colors=["#FFFFFF"], default_size=30, color_mode="fixed"),
119
+ layers=True,
120
+ interactive=True,
121
+ )
122
+
123
+ gr.Markdown("### 📤 Output Result")
124
+ gallery = gr.Gallery(label="🖼️ Result Gallery")
125
+ gr.Markdown(
126
+ """<br>
127
+
128
+ ### ✨User Tips:
129
+ 1. **Quality of multilingual generation.** This implementation strategy combines Calligrapher with the fine-tuned base model (textflux) without additional fine-tuning, please temper expectations regarding output quality.
130
+
131
+ 2. **Speed vs Quality Trade-off.** Use fewer steps (e.g., 10-step which takes ~4s/image on a single A6000 GPU) for faster generation, but quality may be lower.
132
+
133
+ 3. **Inpaint Position Freedom.** Inpainting positions are flexible - they don't necessarily need to match the original text locations in the input image.
134
+
135
+ 4. **Iterative Editing.** Drag outputs from the gallery to the Image Editing Panel (clean the Editing Panel first) for quick refinements.
136
+
137
+ 5. **Mask Optimization.** Adjust mask size/aspect ratio to match your desired content. The model tends to fill the masks, and harmonizes the generation with background in terms of color and lighting.
138
+
139
+ 6. **Reference Image Tip.** White-background references improve style consistency - the encoder also considers background context of the given reference image.
140
+
141
+ 7. **Resolution Balance.** Very high-resolution generation sometimes triggers spelling errors. 512/768px are recommended considering the model is trained under the resolution of 512.
142
+ """
143
+ )
144
+ with gr.Column(scale=1):
145
+ gr.Markdown("### ⚙️Settings")
146
+ reference_image = gr.Image(
147
+ label="🧩 Reference Image (skip this if self-reference)",
148
+ sources=["upload"],
149
+ type="pil",
150
+ )
151
+ prompt = gr.Textbox(
152
+ label="📝 Prompt",
153
+ placeholder="你好",
154
+ value="你好"
155
+ )
156
+
157
+ with gr.Accordion("🔧 Additional Settings", open=True):
158
+ with gr.Row():
159
+ height = gr.Number(label="Height", value=512, precision=0)
160
+ width = gr.Number(label="Width", value=512, precision=0)
161
+ scale = gr.Slider(0.0, 2.0, 1.0, step=0.1, value=1.0, label="🎚️ Strength")
162
+ steps = gr.Slider(1, 100, 30, step=1, label="🔁 Steps")
163
+ with gr.Row():
164
+ seed = gr.Number(label="🎲 Seed", value=56, precision=0)
165
+ num_images = gr.Slider(1, 16, 2, step=1, label="🖼️ Sample Amount")
166
+
167
+ run_btn = gr.Button("🚀 Run", variant="primary")
168
+
169
+ mask_output = gr.Image(label="🟩 Mask Demo")
170
+ reference_demo = gr.Image(label="🧩 Reference Demo")
171
+
172
+ # Run button event.
173
+ run_btn.click(
174
+ fn=process_and_generate,
175
+ inputs=[
176
+ editor_component,
177
+ reference_image,
178
+ prompt,
179
+ height,
180
+ width,
181
+ scale,
182
+ steps,
183
+ seed,
184
+ num_images
185
+ ],
186
+ outputs=[
187
+ mask_output,
188
+ reference_demo,
189
+ gallery
190
+ ]
191
+ )
192
+
193
+ if __name__ == "__main__":
194
+ demo.launch(server_name="0.0.0.0", server_port=1234, share=False)
gradio_demo_upload_mask.py ADDED
@@ -0,0 +1,279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Gradio demo for text customization with Calligrapher (the reference AND MASK are uploaded by the user).
3
+ This demo is useful for reproduction.
4
+
5
+ """
6
+
7
+ import os
8
+ import json
9
+ import gradio as gr
10
+ import numpy as np
11
+ from datetime import datetime
12
+ import torch
13
+ from PIL import Image
14
+
15
+ from pipeline_calligrapher import CalligrapherPipeline
16
+ from models.calligrapher import Calligrapher
17
+ from models.transformer_flux_inpainting import FluxTransformer2DModel
18
+ from utils import get_bbox_from_mask, crop_image_from_bb, \
19
+ resize_img_and_pad, generate_context_reference_image
20
+
21
+ # Global settings.
22
+ with open(os.path.join(os.path.dirname(__file__), 'path_dict.json'), 'r') as f:
23
+ path_dict = json.load(f)
24
+ SAVE_DIR = path_dict['gradio_save_dir']
25
+ os.environ["GRADIO_TEMP_DIR"] = path_dict['gradio_temp_dir']
26
+ os.environ['TMPDIR'] = path_dict['gradio_temp_dir']
27
+
28
+
29
+ # Function of loading pre-trained models.
30
+ def load_models():
31
+ base_model_path = path_dict['base_model_path']
32
+ image_encoder_path = path_dict['image_encoder_path']
33
+ calligrapher_path = path_dict['calligrapher_path']
34
+ transformer = FluxTransformer2DModel.from_pretrained(base_model_path, subfolder="transformer",
35
+ torch_dtype=torch.bfloat16)
36
+ pipe = CalligrapherPipeline.from_pretrained(base_model_path, transformer=transformer,
37
+ torch_dtype=torch.bfloat16).to("cuda")
38
+ model = Calligrapher(pipe, image_encoder_path, calligrapher_path, device="cuda", num_tokens=128)
39
+ return model
40
+
41
+
42
+ # Init models.
43
+ model = load_models()
44
+ print('Model loaded!')
45
+
46
+
47
+ def process_and_generate(source_image, mask_image, reference_image, prompt, height, width,
48
+ scale, steps=50, seed=42, use_context=True, num_images=1):
49
+ print('Begin processing!')
50
+ # Job directory.
51
+ job_name = datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
52
+ job_dir = os.path.join(SAVE_DIR, job_name)
53
+ os.makedirs(job_dir, exist_ok=True)
54
+
55
+ # Get source, mask, and cropped images from gr.ImageEditor.
56
+ source_image.save(os.path.join(job_dir, 'source_image.png'))
57
+ mask_image.save(os.path.join(job_dir, 'mask_image.png'))
58
+
59
+ # Resize source and mask.
60
+ source_image = source_image.resize((width, height))
61
+ mask_image = mask_image.resize((width, height), Image.NEAREST)
62
+ mask_np = np.array(mask_image)
63
+ mask_np[mask_np > 0] = 255
64
+ mask_image = Image.fromarray(mask_np.astype(np.uint8))
65
+
66
+ if reference_image is None:
67
+ # If self-inpaint (no input ref): (1) get bounding box from the mask and (2) perform cropping to get ref image.
68
+ tl, br = get_bbox_from_mask(mask_image)
69
+ # Convert irregularly shaped masks into rectangles.
70
+ reference_image = crop_image_from_bb(source_image, tl, br)
71
+ # Raw reference image before resizing.
72
+ reference_image.save(os.path.join(job_dir, 'reference_image_raw.png'))
73
+ reference_image_to_encoder = resize_img_and_pad(reference_image, target_size=(512, 512)) # 512 considering SigLip
74
+ reference_image_to_encoder.save(os.path.join(job_dir, 'reference_to_encoder.png'))
75
+ reference_context = generate_context_reference_image(reference_image, width)
76
+
77
+ if use_context:
78
+ # Concat the context on the top of the input masked image in the pixel space.
79
+ source_with_context = Image.new(source_image.mode, (width, reference_context.size[1] + height))
80
+ source_with_context.paste(reference_context, (0, 0))
81
+ source_with_context.paste(source_image, (0, reference_context.size[1]))
82
+ source_with_context.save(os.path.join(job_dir, 'source_with_context.png'))
83
+ # Concat the zero mask on the top of the mask image.
84
+ mask_with_context = Image.new(mask_image.mode,
85
+ (mask_image.size[0], reference_context.size[1] + mask_image.size[0]), color=0)
86
+ mask_with_context.paste(mask_image, (0, reference_context.size[1]))
87
+
88
+ source_image = source_with_context
89
+ mask_image = mask_with_context
90
+
91
+ all_generated_images = []
92
+ for i in range(num_images):
93
+ res = model.generate(
94
+ image=source_image,
95
+ mask_image=mask_image,
96
+ ref_image=reference_image_to_encoder,
97
+ prompt=prompt,
98
+ scale=scale,
99
+ num_inference_steps=steps,
100
+ width=source_image.size[0],
101
+ height=source_image.size[1],
102
+ seed=seed + i,
103
+ )[0]
104
+ if use_context:
105
+ res_vis = res.crop((0, reference_context.size[1], res.width, res.height)) # remove context
106
+ mask_vis = mask_image.crop(
107
+ (0, reference_context.size[1], mask_image.width, mask_image.height)) # remove context mask
108
+ else:
109
+ res_vis = res
110
+ mask_vis = mask_image
111
+ res_vis.save(os.path.join(job_dir, f'result_{i}.png'))
112
+ all_generated_images.append((res_vis, f"Generating {i + 1} (Seed: {seed + i})"))
113
+
114
+ return mask_vis, reference_image_to_encoder, all_generated_images
115
+
116
+
117
+ # Construct example data.
118
+ sample_data = [
119
+ {
120
+ "source": Image.open("samples/test11_source.png"),
121
+ "mask": Image.open("samples/test11_mask.png"),
122
+ "reference": Image.open("samples/test11_ref.png"),
123
+ "prompt": "The text is 'Rose'."
124
+ },
125
+ {
126
+ "source": Image.open("samples/test17_source.png"),
127
+ "mask": Image.open("samples/test17_mask.png"),
128
+ "reference": Image.open("samples/rainbow.jpg"),
129
+ "prompt": "The text is 'Rainbow'."
130
+ },
131
+ {
132
+ "source": Image.open("samples/test17_source.png"),
133
+ "mask": Image.open("samples/test17_mask.png"),
134
+ "reference": Image.open("samples/fire.jpg"),
135
+ "prompt": "The text is 'Fire!'."
136
+ }
137
+ ]
138
+
139
+ examples = [
140
+ [
141
+ sample["source"],
142
+ sample["mask"],
143
+ sample["reference"],
144
+ sample["prompt"],
145
+ 512, # height
146
+ 512, # width
147
+ 1.0, # scale
148
+ 10, # steps
149
+ 2025, # seed
150
+ True, # use_context
151
+ 2 # num_images
152
+ ]
153
+ for sample in sample_data
154
+ ]
155
+
156
+ # Main gradio codes.
157
+ with gr.Blocks(theme="default", css=".image-editor img {max-width: 70%; height: 70%;}") as demo:
158
+ gr.Markdown(
159
+ """
160
+ # 🖌️ Calligrapher: Freestyle Text Image Customization
161
+ """
162
+ )
163
+
164
+ with gr.Row():
165
+ with gr.Column(scale=3):
166
+ gr.Markdown("### 🎨 Image Editing Panel")
167
+ source_image = gr.Image(
168
+ label="Source Image",
169
+ sources=["upload"],
170
+ type="pil",
171
+ value=Image.open(f"samples/test50_source.png"),
172
+ )
173
+
174
+ gr.Markdown("### 📤 Output Result")
175
+ gallery = gr.Gallery(label="🖼️ Result Gallery")
176
+ gr.Markdown(
177
+ """<br>
178
+
179
+ ### ✨User Tips:
180
+
181
+ 1. **Speed vs Quality Trade-off.** Use fewer steps (e.g., 10-step which takes ~4s/image on a single A6000 GPU) for faster generation, but quality may be lower.
182
+
183
+ 2. **Inpaint Position Freedom.** Inpainting positions are flexible - they don't necessarily need to match the original text locations in the input image.
184
+
185
+ 3. **Iterative Editing.** Drag outputs from the gallery to the Image Editing Panel (clean the Editing Panel first) for quick refinements.
186
+
187
+ 4. **Mask Optimization.** Adjust mask size/aspect ratio to match your desired content. The model tends to fill the masks, and harmonizes the generation with background in terms of color and lighting.
188
+
189
+ 5. **Reference Image Tip.** White-background references improve style consistency - the encoder also considers background context of the given reference image.
190
+
191
+ 6. **Resolution Balance.** Very high-resolution generation sometimes triggers spelling errors. 512/768px are recommended considering the model is trained under the resolution of 512.
192
+ """
193
+ )
194
+ with gr.Column(scale=1):
195
+ gr.Markdown("### ⚙️Settings")
196
+ mask_image = gr.Image(
197
+ label="🧩 Mask Image",
198
+ sources=["upload"],
199
+ type="pil",
200
+ value=Image.open(f"samples/test50_mask.png"),
201
+ )
202
+ reference_image = gr.Image(
203
+ label="🧩 Reference Image",
204
+ sources=["upload"],
205
+ type="pil",
206
+ value=Image.open(f"samples/test50_ref.png")
207
+ )
208
+ prompt = gr.Textbox(
209
+ label="📝 Prompt",
210
+ placeholder="The text is 'Image'...",
211
+ value="The text is 'Balloon'."
212
+ )
213
+
214
+ with gr.Accordion("🔧 Additional Settings", open=True):
215
+ with gr.Row():
216
+ height = gr.Number(label="Height", value=512, precision=0)
217
+ width = gr.Number(label="Width", value=512, precision=0)
218
+ scale = gr.Slider(0.0, 2.0, 1.0, step=0.1, value=1.0, label="🎚️ Strength")
219
+ steps = gr.Slider(1, 100, 50, step=1, label="🔁 Steps")
220
+ with gr.Row():
221
+ seed = gr.Number(label="🎲 Seed", value=56, precision=0)
222
+ use_context = gr.Checkbox(value=True, label="🔍 Use Context", interactive=True)
223
+ num_images = gr.Slider(1, 16, 2, step=1, label="🖼️ Sample Amount")
224
+
225
+ run_btn = gr.Button("🚀 Run", variant="primary")
226
+
227
+ mask_output = gr.Image(label="🟩 Mask Demo")
228
+ reference_demo = gr.Image(label="🧩 Reference Demo")
229
+
230
+ # Run button event.
231
+ run_btn.click(
232
+ fn=process_and_generate,
233
+ inputs=[
234
+ source_image,
235
+ mask_image,
236
+ reference_image,
237
+ prompt,
238
+ height,
239
+ width,
240
+ scale,
241
+ steps,
242
+ seed,
243
+ use_context,
244
+ num_images
245
+ ],
246
+ outputs=[
247
+ mask_output,
248
+ reference_demo,
249
+ gallery
250
+ ]
251
+ )
252
+
253
+ gr.Examples(
254
+ examples=examples,
255
+ inputs=[
256
+ source_image,
257
+ mask_image,
258
+ reference_image,
259
+ prompt,
260
+ height,
261
+ width,
262
+ scale,
263
+ steps,
264
+ seed,
265
+ use_context,
266
+ num_images
267
+ ],
268
+ outputs=[
269
+ mask_output,
270
+ reference_demo,
271
+ gallery
272
+ ],
273
+ fn=process_and_generate,
274
+ label="✨ Example Inputs: Click any example below to load it.",
275
+ examples_per_page=3
276
+ )
277
+
278
+ if __name__ == "__main__":
279
+ demo.launch(server_name="0.0.0.0", server_port=1234, share=False)
infer_calligrapher_cross_custom.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This is the script of scaled cross-reference customization inference with Calligrapher.
3
+ """
4
+
5
+ import os
6
+ import json
7
+ import random
8
+ from PIL import Image
9
+ import numpy as np
10
+ from datetime import datetime
11
+
12
+ import torch
13
+ from diffusers.utils import load_image
14
+
15
+ from pipeline_calligrapher import CalligrapherPipeline
16
+ from models.calligrapher import Calligrapher
17
+ from models.transformer_flux_inpainting import FluxTransformer2DModel
18
+
19
+ from utils import resize_img_and_pad, generate_context_reference_image
20
+
21
+
22
+ def infer_calligrapher(test_image_dir, result_save_dir,
23
+ target_h=512, target_w=512,
24
+ gen_num_per_case=2):
25
+
26
+ # Set job dir.
27
+ job_name = datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
28
+ result_save_path = os.path.join(result_save_dir, job_name)
29
+ if not os.path.exists(result_save_path):
30
+ os.makedirs(result_save_path, exist_ok=True)
31
+
32
+ # Load models.
33
+ base_model_path = path_dict['base_model_path']
34
+ image_encoder_path = path_dict['image_encoder_path']
35
+ calligrapher_path = path_dict['calligrapher_path']
36
+ transformer = FluxTransformer2DModel.from_pretrained(
37
+ base_model_path, subfolder="transformer", torch_dtype=torch.bfloat16
38
+ )
39
+ pipe = CalligrapherPipeline.from_pretrained(base_model_path,
40
+ transformer=transformer, torch_dtype=torch.bfloat16).to("cuda")
41
+ model = Calligrapher(pipe, image_encoder_path, calligrapher_path,
42
+ device="cuda", num_tokens=128)
43
+
44
+ source_image_names = [i for i in os.listdir(test_image_dir) if 'source.png' in i]
45
+ # Loading prompts from the bench txt and printing them.
46
+ info_dict = {}
47
+ with open(os.path.join(test_image_dir, 'cross_bench.txt'), 'r') as file:
48
+ for line in file:
49
+ line = line.strip()
50
+ if line:
51
+ key, value = line.split('-', 1)
52
+ info_dict[int(key)] = value
53
+ print(info_dict)
54
+ i = 0
55
+ print('Printing given prompts...')
56
+ for img_id in sorted(info_dict.keys()):
57
+ i += 1
58
+ info = info_dict[img_id]
59
+ print(f'Sample #{i}: {img_id} - {info}')
60
+
61
+ count = 0
62
+ for source_image_name in sorted(source_image_names):
63
+ count += 1
64
+ img_id = int(source_image_name.split("test")[1].split("_")[0])
65
+ if img_id not in info_dict.keys():
66
+ continue
67
+ info = info_dict[img_id]
68
+ ref_ids, text = info.split('-')
69
+ ref_ids = ref_ids.split(',')
70
+ prompt = f"The text is '{text}'."
71
+
72
+ source_image_path = os.path.join(test_image_dir, source_image_name)
73
+ mask_image_name = source_image_name.replace('source', 'mask')
74
+ mask_image_path = os.path.join(test_image_dir, mask_image_name)
75
+
76
+ for ref_id in ref_ids:
77
+ reference_image_name = source_image_name.replace('source', 'ref').replace(f'{img_id}', f'{ref_id}')
78
+ reference_image_path = os.path.join(test_image_dir, reference_image_name)
79
+
80
+ print('source_image_path: ', source_image_path)
81
+ print('mask_image_path: ', mask_image_path)
82
+ print('reference_image_path: ', reference_image_path)
83
+ print(f'prompt: {prompt}')
84
+
85
+ source_image = load_image(source_image_path)
86
+ mask_image = load_image(mask_image_path)
87
+ # Resize source and mask.
88
+ source_image = source_image.resize((target_w, target_h))
89
+ mask_image = mask_image.resize((target_w, target_h), Image.NEAREST)
90
+ mask_np = np.array(mask_image)
91
+ mask_np[mask_np > 0] = 255
92
+ mask_image = Image.fromarray(mask_np.astype(np.uint8))
93
+ source_img_w, source_img_h = source_image.size
94
+
95
+ # resize reference to fit CLIP.
96
+ reference_image = Image.open(reference_image_path).convert("RGB")
97
+ reference_image_to_encoder = resize_img_and_pad(reference_image, target_size=[512, 512])
98
+
99
+ reference_context = generate_context_reference_image(reference_image, source_img_w)
100
+ # Concat the context image on the top.
101
+ source_with_context = Image.new(source_image.mode, (source_img_w, reference_context.size[1] + source_img_h))
102
+ source_with_context.paste(reference_context, (0, 0))
103
+ source_with_context.paste(source_image, (0, reference_context.size[1]))
104
+ # Concat the 0 mask on the top of the mask image.
105
+ mask_with_context = Image.new(mask_image.mode,
106
+ (mask_image.size[0], reference_context.size[1] + mask_image.size[0]), color=0)
107
+ mask_with_context.paste(mask_image, (0, reference_context.size[1]))
108
+
109
+ # Identifiers in filename.
110
+ ref_id = reference_image_name.split('_')[0]
111
+ safe_prompt = prompt.replace(" ", "_").replace("'", "").replace(",", "").replace('"', '').replace('?', '')[:50]
112
+
113
+ for i in range(gen_num_per_case):
114
+ seed = random.randint(0, 2 ** 32 - 1)
115
+ images = model.generate(
116
+ image=source_with_context,
117
+ mask_image=mask_with_context,
118
+ ref_image=reference_image_to_encoder,
119
+ prompt=prompt,
120
+ scale=1.0,
121
+ num_inference_steps=50,
122
+ width=source_with_context.size[0],
123
+ height=source_with_context.size[1],
124
+ seed=seed,
125
+ )
126
+
127
+ index = len(os.listdir(result_save_path))
128
+ output_filename = f"result_{index}_{ref_id}_{safe_prompt}_{seed}.png"
129
+
130
+ result_img = images[0]
131
+ result_img_vis = result_img.crop((0, reference_context.size[1], result_img.width, result_img.height))
132
+ result_img_vis.save(os.path.join(result_save_path, output_filename))
133
+
134
+ target_size = (source_image.size[0], source_image.size[1])
135
+ vis_img = Image.new('RGB', (source_image.size[0] * 3, source_image.size[1]))
136
+ vis_img.paste(source_image.resize(target_size), (0, 0))
137
+ vis_img.paste(reference_context.resize(target_size), (source_image.size[0], 0))
138
+ vis_img.paste(result_img_vis.resize(target_size), (source_image.size[0] * 2, 0))
139
+ vis_img_save_path = os.path.join(result_save_path, f'vis_{output_filename}'.replace('.png', '.jpg'))
140
+ vis_img.save(vis_img_save_path)
141
+ print(f"Generated images saved to {vis_img_save_path}.")
142
+
143
+
144
+ if __name__ == '__main__':
145
+ with open(os.path.join(os.path.dirname(__file__), 'path_dict.json'), 'r') as f:
146
+ path_dict = json.load(f)
147
+ # Set directory paths.
148
+ test_image_dir = path_dict['data_dir']
149
+ result_save_dir = path_dict['cli_save_dir']
150
+ infer_calligrapher(test_image_dir, result_save_dir,
151
+ target_h=512, target_w=512,
152
+ gen_num_per_case=2)
153
+ print('Finished!')
infer_calligrapher_self_custom.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This is the script of scaled self-reference customization inference with Calligrapher.
3
+ """
4
+
5
+ import os
6
+ import json
7
+ import random
8
+ from PIL import Image
9
+ import numpy as np
10
+ from datetime import datetime
11
+
12
+ import torch
13
+ from diffusers.utils import load_image
14
+
15
+ from pipeline_calligrapher import CalligrapherPipeline
16
+ from models.calligrapher import Calligrapher
17
+ from models.transformer_flux_inpainting import FluxTransformer2DModel
18
+
19
+ from utils import resize_img_and_pad, generate_context_reference_image
20
+
21
+
22
+ def infer_calligrapher(test_image_dir, result_save_dir,
23
+ target_h=512, target_w=512,
24
+ gen_num_per_case=2, load_prompt_from_txt=True):
25
+ # Set job dir.
26
+ job_name = datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
27
+ result_save_path = os.path.join(result_save_dir, job_name)
28
+ if not os.path.exists(result_save_path):
29
+ os.makedirs(result_save_path, exist_ok=True)
30
+
31
+ # Load models.
32
+ base_model_path = path_dict['base_model_path']
33
+ image_encoder_path = path_dict['image_encoder_path']
34
+ calligrapher_path = path_dict['calligrapher_path']
35
+ transformer = FluxTransformer2DModel.from_pretrained(
36
+ base_model_path, subfolder="transformer", torch_dtype=torch.bfloat16
37
+ )
38
+ pipe = CalligrapherPipeline.from_pretrained(base_model_path,
39
+ transformer=transformer, torch_dtype=torch.bfloat16).to("cuda")
40
+ model = Calligrapher(pipe, image_encoder_path, calligrapher_path,
41
+ device="cuda", num_tokens=128)
42
+
43
+ source_image_names = [i for i in os.listdir(test_image_dir) if 'source.png' in i]
44
+ # Loading prompts from the bench txt and printing them.
45
+ if load_prompt_from_txt:
46
+ prompt_dict = {}
47
+ with open(os.path.join(test_image_dir, 'self_bench.txt'), 'r') as file:
48
+ for line in file:
49
+ line = line.strip()
50
+ if line:
51
+ key, value = line.split('-', 1)
52
+ prompt_dict[int(key)] = value
53
+
54
+ i = 0
55
+ print('Printing given prompts...')
56
+ for img_id in sorted(prompt_dict.keys()):
57
+ i += 1
58
+ text = prompt_dict[img_id]
59
+ print(f'Sample #{i}: {img_id} - {text}')
60
+
61
+ count = 0
62
+ for source_image_name in sorted(source_image_names):
63
+ count += 1
64
+ source_image_path = os.path.join(test_image_dir, source_image_name)
65
+ reference_image_name = source_image_name.replace('source', 'ref')
66
+ reference_image_path = os.path.join(test_image_dir, reference_image_name)
67
+ mask_image_name = source_image_name.replace('source', 'mask')
68
+ mask_image_path = os.path.join(test_image_dir, mask_image_name)
69
+
70
+ print('source_image_path: ', source_image_path)
71
+ print('mask_image_path: ', mask_image_path)
72
+ print('reference_image_path: ', reference_image_path)
73
+
74
+ if load_prompt_from_txt:
75
+ img_id = int(source_image_name.split("test")[1].split("_")[0])
76
+ text = prompt_dict[img_id]
77
+ prompt = f"The text is '{text}'."
78
+ else:
79
+ prompt = "The text is 'Generation'."
80
+ print(f'prompt: {prompt}')
81
+
82
+ source_image = load_image(source_image_path)
83
+ mask_image = load_image(mask_image_path)
84
+ # Resize source and mask.
85
+ source_image = source_image.resize((target_w, target_h))
86
+ mask_image = mask_image.resize((target_w, target_h), Image.NEAREST)
87
+ mask_np = np.array(mask_image)
88
+ mask_np[mask_np > 0] = 255
89
+ mask_image = Image.fromarray(mask_np.astype(np.uint8))
90
+ source_img_w, source_img_h = source_image.size
91
+
92
+ # resize reference to fit CLIP.
93
+ reference_image = Image.open(reference_image_path).convert("RGB")
94
+ reference_image_to_encoder = resize_img_and_pad(reference_image, target_size=[512, 512])
95
+
96
+ reference_context = generate_context_reference_image(reference_image, source_img_w)
97
+ # Concat the context image on the top.
98
+ source_with_context = Image.new(source_image.mode, (source_img_w, reference_context.size[1] + source_img_h))
99
+ source_with_context.paste(reference_context, (0, 0))
100
+ source_with_context.paste(source_image, (0, reference_context.size[1]))
101
+ # Concat the 0 mask on the top of the mask image.
102
+ mask_with_context = Image.new(mask_image.mode,
103
+ (mask_image.size[0], reference_context.size[1] + mask_image.size[0]), color=0)
104
+ mask_with_context.paste(mask_image, (0, reference_context.size[1]))
105
+
106
+ # Identifiers in filename.
107
+ ref_id = reference_image_name.split('_')[0]
108
+ safe_prompt = prompt.replace(" ", "_").replace("'", "").replace(",", "").replace('"', '').replace('?', '')[:50]
109
+ for i in range(gen_num_per_case):
110
+ seed = random.randint(0, 2 ** 32 - 1)
111
+ images = model.generate(
112
+ image=source_with_context,
113
+ mask_image=mask_with_context,
114
+ ref_image=reference_image_to_encoder,
115
+ prompt=prompt,
116
+ scale=1.0,
117
+ num_inference_steps=50,
118
+ width=source_with_context.size[0],
119
+ height=source_with_context.size[1],
120
+ seed=seed,
121
+ )
122
+
123
+ index = len(os.listdir(result_save_path))
124
+ output_filename = f"result_{index}_{ref_id}_{safe_prompt}_{seed}.png"
125
+
126
+ result_img = images[0]
127
+ result_img_vis = result_img.crop((0, reference_context.size[1], result_img.width, result_img.height))
128
+ result_img_vis.save(os.path.join(result_save_path, output_filename))
129
+
130
+ target_size = (source_image.size[0], source_image.size[1])
131
+ vis_img = Image.new('RGB', (source_image.size[0] * 3, source_image.size[1]))
132
+ vis_img.paste(source_image.resize(target_size), (0, 0))
133
+ vis_img.paste(reference_context.resize(target_size), (source_image.size[0], 0))
134
+ vis_img.paste(result_img_vis.resize(target_size), (source_image.size[0] * 2, 0))
135
+ vis_img_save_path = os.path.join(result_save_path, f'vis_{output_filename}'.replace('.png', '.jpg'))
136
+ vis_img.save(vis_img_save_path)
137
+ print(f"Generated images saved to {vis_img_save_path}.")
138
+
139
+
140
+ if __name__ == '__main__':
141
+ with open(os.path.join(os.path.dirname(__file__), 'path_dict.json'), 'r') as f:
142
+ path_dict = json.load(f)
143
+ # Set directory paths.
144
+ test_image_dir = path_dict['data_dir']
145
+ result_save_dir = path_dict['cli_save_dir']
146
+ infer_calligrapher(test_image_dir, result_save_dir,
147
+ target_h=512, target_w=512,
148
+ gen_num_per_case=2, load_prompt_from_txt=True)
149
+ print('Finished!')
path_dict.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cli_save_dir": "./cli_exps/",
3
+ "gradio_save_dir": "./gradio_exps/",
4
+ "gradio_temp_dir": "./gradio_tmp",
5
+ "data_dir": "path/to/Calligrapher_bench_testing",
6
+ "base_model_path": "path/to/FLUX.1-Fill-dev",
7
+ "image_encoder_path": "google/siglip-so400m-patch14-384",
8
+ "calligrapher_path": "path/to/calligrapher.bin",
9
+ "textflux_path": "path/to/textflux-lora/pytorch_lora_weights.safetensors"
10
+ }
pipeline_calligrapher.py ADDED
@@ -0,0 +1,972 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Black Forest Labs and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import inspect
16
+ from typing import Any, Callable, Dict, List, Optional, Union
17
+
18
+ import numpy as np
19
+ import torch
20
+ from transformers import CLIPTextModel, CLIPTokenizer, T5EncoderModel, T5TokenizerFast
21
+
22
+ from diffusers.image_processor import VaeImageProcessor
23
+ from diffusers.loaders import FluxLoraLoaderMixin, FromSingleFileMixin, TextualInversionLoaderMixin
24
+ from diffusers.models.autoencoders import AutoencoderKL
25
+ from models.transformer_flux_inpainting import FluxTransformer2DModel
26
+ from diffusers.schedulers import FlowMatchEulerDiscreteScheduler
27
+ from diffusers.utils import (
28
+ USE_PEFT_BACKEND,
29
+ is_torch_xla_available,
30
+ logging,
31
+ replace_example_docstring,
32
+ scale_lora_layers,
33
+ unscale_lora_layers,
34
+ )
35
+ from diffusers.utils.torch_utils import randn_tensor
36
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
37
+ from diffusers.pipelines.flux.pipeline_output import FluxPipelineOutput
38
+
39
+ if is_torch_xla_available():
40
+ import torch_xla.core.xla_model as xm
41
+
42
+ XLA_AVAILABLE = True
43
+ else:
44
+ XLA_AVAILABLE = False
45
+
46
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
47
+
48
+ EXAMPLE_DOC_STRING = """
49
+ Examples:
50
+ ```py
51
+ >>> import torch
52
+ >>> from diffusers import FluxFillPipeline
53
+ >>> from diffusers.utils import load_image
54
+
55
+ >>> image = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/cup.png")
56
+ >>> mask = load_image("https://huggingface.co/datasets/YiYiXu/testing-images/resolve/main/cup_mask.png")
57
+
58
+ >>> pipe = FluxFillPipeline.from_pretrained("black-forest-labs/FLUX.1-Fill-dev", torch_dtype=torch.bfloat16)
59
+ >>> pipe.enable_model_cpu_offload() # save some VRAM by offloading the model to CPU
60
+
61
+ >>> image = pipe(
62
+ ... prompt="a white paper cup",
63
+ ... image=image,
64
+ ... mask_image=mask,
65
+ ... height=1632,
66
+ ... width=1232,
67
+ ... guidance_scale=30,
68
+ ... num_inference_steps=50,
69
+ ... max_sequence_length=512,
70
+ ... generator=torch.Generator("cpu").manual_seed(0),
71
+ ... ).images[0]
72
+ >>> image.save("flux_fill.png")
73
+ ```
74
+ """
75
+
76
+
77
+ # Copied from diffusers.pipelines.flux.pipeline_flux.calculate_shift
78
+ def calculate_shift(
79
+ image_seq_len,
80
+ base_seq_len: int = 256,
81
+ max_seq_len: int = 4096,
82
+ base_shift: float = 0.5,
83
+ max_shift: float = 1.16,
84
+ ):
85
+ m = (max_shift - base_shift) / (max_seq_len - base_seq_len)
86
+ b = base_shift - m * base_seq_len
87
+ mu = image_seq_len * m + b
88
+ return mu
89
+
90
+
91
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
92
+ def retrieve_timesteps(
93
+ scheduler,
94
+ num_inference_steps: Optional[int] = None,
95
+ device: Optional[Union[str, torch.device]] = None,
96
+ timesteps: Optional[List[int]] = None,
97
+ sigmas: Optional[List[float]] = None,
98
+ **kwargs,
99
+ ):
100
+ r"""
101
+ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
102
+ custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
103
+
104
+ Args:
105
+ scheduler (`SchedulerMixin`):
106
+ The scheduler to get timesteps from.
107
+ num_inference_steps (`int`):
108
+ The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
109
+ must be `None`.
110
+ device (`str` or `torch.device`, *optional*):
111
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
112
+ timesteps (`List[int]`, *optional*):
113
+ Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
114
+ `num_inference_steps` and `sigmas` must be `None`.
115
+ sigmas (`List[float]`, *optional*):
116
+ Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
117
+ `num_inference_steps` and `timesteps` must be `None`.
118
+
119
+ Returns:
120
+ `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
121
+ second element is the number of inference steps.
122
+ """
123
+ if timesteps is not None and sigmas is not None:
124
+ raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
125
+ if timesteps is not None:
126
+ accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
127
+ if not accepts_timesteps:
128
+ raise ValueError(
129
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
130
+ f" timestep schedules. Please check whether you are using the correct scheduler."
131
+ )
132
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
133
+ timesteps = scheduler.timesteps
134
+ num_inference_steps = len(timesteps)
135
+ elif sigmas is not None:
136
+ accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
137
+ if not accept_sigmas:
138
+ raise ValueError(
139
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
140
+ f" sigmas schedules. Please check whether you are using the correct scheduler."
141
+ )
142
+ scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
143
+ timesteps = scheduler.timesteps
144
+ num_inference_steps = len(timesteps)
145
+ else:
146
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
147
+ timesteps = scheduler.timesteps
148
+ return timesteps, num_inference_steps
149
+
150
+
151
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
152
+ def retrieve_latents(
153
+ encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
154
+ ):
155
+ if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
156
+ return encoder_output.latent_dist.sample(generator)
157
+ elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax":
158
+ return encoder_output.latent_dist.mode()
159
+ elif hasattr(encoder_output, "latents"):
160
+ return encoder_output.latents
161
+ else:
162
+ raise AttributeError("Could not access latents of provided encoder_output")
163
+
164
+
165
+ class CalligrapherPipeline(
166
+ DiffusionPipeline,
167
+ FluxLoraLoaderMixin,
168
+ FromSingleFileMixin,
169
+ TextualInversionLoaderMixin,
170
+ ):
171
+ r"""
172
+ The Flux Fill pipeline for image inpainting/outpainting.
173
+
174
+ Reference: https://blackforestlabs.ai/flux-1-tools/
175
+
176
+ Args:
177
+ transformer ([`FluxTransformer2DModel`]):
178
+ Conditional Transformer (MMDiT) architecture to denoise the encoded image latents.
179
+ scheduler ([`FlowMatchEulerDiscreteScheduler`]):
180
+ A scheduler to be used in combination with `transformer` to denoise the encoded image latents.
181
+ vae ([`AutoencoderKL`]):
182
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
183
+ text_encoder ([`CLIPTextModel`]):
184
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically
185
+ the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.
186
+ text_encoder_2 ([`T5EncoderModel`]):
187
+ [T5](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5EncoderModel), specifically
188
+ the [google/t5-v1_1-xxl](https://huggingface.co/google/t5-v1_1-xxl) variant.
189
+ tokenizer (`CLIPTokenizer`):
190
+ Tokenizer of class
191
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/en/model_doc/clip#transformers.CLIPTokenizer).
192
+ tokenizer_2 (`T5TokenizerFast`):
193
+ Second Tokenizer of class
194
+ [T5TokenizerFast](https://huggingface.co/docs/transformers/en/model_doc/t5#transformers.T5TokenizerFast).
195
+ """
196
+
197
+ model_cpu_offload_seq = "text_encoder->text_encoder_2->transformer->vae"
198
+ _optional_components = []
199
+ _callback_tensor_inputs = ["latents", "prompt_embeds"]
200
+
201
+ def __init__(
202
+ self,
203
+ scheduler: FlowMatchEulerDiscreteScheduler,
204
+ vae: AutoencoderKL,
205
+ text_encoder: CLIPTextModel,
206
+ tokenizer: CLIPTokenizer,
207
+ text_encoder_2: T5EncoderModel,
208
+ tokenizer_2: T5TokenizerFast,
209
+ transformer: FluxTransformer2DModel,
210
+ ):
211
+ super().__init__()
212
+
213
+ print('Using scheduler: ', scheduler)
214
+
215
+ self.register_modules(
216
+ vae=vae,
217
+ text_encoder=text_encoder,
218
+ text_encoder_2=text_encoder_2,
219
+ tokenizer=tokenizer,
220
+ tokenizer_2=tokenizer_2,
221
+ transformer=transformer,
222
+ scheduler=scheduler,
223
+ )
224
+ self.vae_scale_factor = (
225
+ 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 8
226
+ )
227
+ # Flux latents are turned into 2x2 patches and packed. This means the latent width and height has to be divisible
228
+ # by the patch size. So the vae scale factor is multiplied by the patch size to account for this
229
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor * 2)
230
+ self.mask_processor = VaeImageProcessor(
231
+ vae_scale_factor=self.vae_scale_factor * 2,
232
+ vae_latent_channels=self.vae.config.latent_channels,
233
+ do_normalize=False,
234
+ do_binarize=True,
235
+ do_convert_grayscale=True,
236
+ )
237
+ self.tokenizer_max_length = (
238
+ self.tokenizer.model_max_length if hasattr(self, "tokenizer") and self.tokenizer is not None else 77
239
+ )
240
+ self.default_sample_size = 128
241
+
242
+ # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._get_t5_prompt_embeds
243
+ def _get_t5_prompt_embeds(
244
+ self,
245
+ prompt: Union[str, List[str]] = None,
246
+ num_images_per_prompt: int = 1,
247
+ max_sequence_length: int = 512,
248
+ device: Optional[torch.device] = None,
249
+ dtype: Optional[torch.dtype] = None,
250
+ ):
251
+ device = device or self._execution_device
252
+ dtype = dtype or self.text_encoder.dtype
253
+
254
+ prompt = [prompt] if isinstance(prompt, str) else prompt
255
+ batch_size = len(prompt)
256
+
257
+ if isinstance(self, TextualInversionLoaderMixin):
258
+ prompt = self.maybe_convert_prompt(prompt, self.tokenizer_2)
259
+
260
+ text_inputs = self.tokenizer_2(
261
+ prompt,
262
+ padding="max_length",
263
+ max_length=max_sequence_length,
264
+ truncation=True,
265
+ return_length=False,
266
+ return_overflowing_tokens=False,
267
+ return_tensors="pt",
268
+ )
269
+ text_input_ids = text_inputs.input_ids
270
+ untruncated_ids = self.tokenizer_2(prompt, padding="longest", return_tensors="pt").input_ids
271
+
272
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
273
+ removed_text = self.tokenizer_2.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1: -1])
274
+ logger.warning(
275
+ "The following part of your input was truncated because `max_sequence_length` is set to "
276
+ f" {max_sequence_length} tokens: {removed_text}"
277
+ )
278
+
279
+ prompt_embeds = self.text_encoder_2(text_input_ids.to(device), output_hidden_states=False)[0]
280
+
281
+ dtype = self.text_encoder_2.dtype
282
+ prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
283
+
284
+ _, seq_len, _ = prompt_embeds.shape
285
+
286
+ # duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method
287
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
288
+ prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
289
+
290
+ return prompt_embeds
291
+
292
+ # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._get_clip_prompt_embeds
293
+ def _get_clip_prompt_embeds(
294
+ self,
295
+ prompt: Union[str, List[str]],
296
+ num_images_per_prompt: int = 1,
297
+ device: Optional[torch.device] = None,
298
+ ):
299
+ device = device or self._execution_device
300
+
301
+ prompt = [prompt] if isinstance(prompt, str) else prompt
302
+ batch_size = len(prompt)
303
+
304
+ if isinstance(self, TextualInversionLoaderMixin):
305
+ prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
306
+
307
+ text_inputs = self.tokenizer(
308
+ prompt,
309
+ padding="max_length",
310
+ max_length=self.tokenizer_max_length,
311
+ truncation=True,
312
+ return_overflowing_tokens=False,
313
+ return_length=False,
314
+ return_tensors="pt",
315
+ )
316
+
317
+ text_input_ids = text_inputs.input_ids
318
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
319
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
320
+ removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1: -1])
321
+ logger.warning(
322
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
323
+ f" {self.tokenizer_max_length} tokens: {removed_text}"
324
+ )
325
+ prompt_embeds = self.text_encoder(text_input_ids.to(device), output_hidden_states=False)
326
+
327
+ # Use pooled output of CLIPTextModel
328
+ prompt_embeds = prompt_embeds.pooler_output
329
+ prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
330
+
331
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
332
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt)
333
+ prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, -1)
334
+
335
+ return prompt_embeds
336
+
337
+ def prepare_mask_latents(
338
+ self,
339
+ mask,
340
+ masked_image,
341
+ batch_size,
342
+ num_channels_latents,
343
+ num_images_per_prompt,
344
+ height,
345
+ width,
346
+ dtype,
347
+ device,
348
+ generator
349
+ ):
350
+ # 1. calculate the height and width of the latents
351
+ # VAE applies 8x compression on images but we must also account for packing which requires
352
+ # latent height and width to be divisible by 2.
353
+ height = 2 * (int(height) // (self.vae_scale_factor * 2))
354
+ width = 2 * (int(width) // (self.vae_scale_factor * 2))
355
+ # 2. encode the masked image
356
+ if masked_image.shape[1] == num_channels_latents:
357
+ masked_image_latents = masked_image
358
+ else:
359
+ masked_image_latents = retrieve_latents(self.vae.encode(masked_image), generator=generator)
360
+
361
+ masked_image_latents = (masked_image_latents - self.vae.config.shift_factor) * self.vae.config.scaling_factor
362
+ masked_image_latents = masked_image_latents.to(device=device, dtype=dtype)
363
+
364
+ # 3. duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method
365
+ batch_size = batch_size * num_images_per_prompt
366
+ if mask.shape[0] < batch_size:
367
+ if not batch_size % mask.shape[0] == 0:
368
+ raise ValueError(
369
+ "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to"
370
+ f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number"
371
+ " of masks that you pass is divisible by the total requested batch size."
372
+ )
373
+ mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1)
374
+ if masked_image_latents.shape[0] < batch_size:
375
+ if not batch_size % masked_image_latents.shape[0] == 0:
376
+ raise ValueError(
377
+ "The passed images and the required batch size don't match. Images are supposed to be duplicated"
378
+ f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed."
379
+ " Make sure the number of images that you pass is divisible by the total requested batch size."
380
+ )
381
+ masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1)
382
+
383
+ # 4. pack the masked_image_latents
384
+ # batch_size, num_channels_latents, height, width -> batch_size, height//2 * width//2 , num_channels_latents*4
385
+ masked_image_latents = self._pack_latents(
386
+ masked_image_latents,
387
+ batch_size,
388
+ num_channels_latents,
389
+ height,
390
+ width,
391
+ )
392
+
393
+ # 5.resize mask to latents shape we we concatenate the mask to the latents
394
+ mask = mask[:, 0, :, :] # batch_size, 8 * height, 8 * width (mask has not been 8x compressed)
395
+ mask = mask.view(
396
+ batch_size, height, self.vae_scale_factor, width, self.vae_scale_factor
397
+ ) # batch_size, height, 8, width, 8
398
+ mask = mask.permute(0, 2, 4, 1, 3) # batch_size, 8, 8, height, width
399
+ mask = mask.reshape(
400
+ batch_size, self.vae_scale_factor * self.vae_scale_factor, height, width
401
+ ) # batch_size, 8*8, height, width
402
+
403
+ # 6. pack the mask:
404
+ # batch_size, 64, height, width -> batch_size, height//2 * width//2 , 64*2*2
405
+ mask = self._pack_latents(
406
+ mask,
407
+ batch_size,
408
+ self.vae_scale_factor * self.vae_scale_factor,
409
+ height,
410
+ width,
411
+ )
412
+ mask = mask.to(device=device, dtype=dtype)
413
+
414
+ return mask, masked_image_latents
415
+
416
+ # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline.encode_prompt
417
+ def encode_prompt(
418
+ self,
419
+ prompt: Union[str, List[str]],
420
+ prompt_2: Union[str, List[str]],
421
+ device: Optional[torch.device] = None,
422
+ num_images_per_prompt: int = 1,
423
+ prompt_embeds: Optional[torch.FloatTensor] = None,
424
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
425
+ max_sequence_length: int = 512,
426
+ lora_scale: Optional[float] = None,
427
+ ):
428
+ r"""
429
+
430
+ Args:
431
+ prompt (`str` or `List[str]`, *optional*):
432
+ prompt to be encoded
433
+ prompt_2 (`str` or `List[str]`, *optional*):
434
+ The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
435
+ used in all text-encoders
436
+ device: (`torch.device`):
437
+ torch device
438
+ num_images_per_prompt (`int`):
439
+ number of images that should be generated per prompt
440
+ prompt_embeds (`torch.FloatTensor`, *optional*):
441
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
442
+ provided, text embeddings will be generated from `prompt` input argument.
443
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
444
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
445
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
446
+ lora_scale (`float`, *optional*):
447
+ A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
448
+ """
449
+ device = device or self._execution_device
450
+
451
+ # set lora scale so that monkey patched LoRA
452
+ # function of text encoder can correctly access it
453
+ if lora_scale is not None and isinstance(self, FluxLoraLoaderMixin):
454
+ self._lora_scale = lora_scale
455
+
456
+ # dynamically adjust the LoRA scale
457
+ if self.text_encoder is not None and USE_PEFT_BACKEND:
458
+ scale_lora_layers(self.text_encoder, lora_scale)
459
+ if self.text_encoder_2 is not None and USE_PEFT_BACKEND:
460
+ scale_lora_layers(self.text_encoder_2, lora_scale)
461
+
462
+ prompt = [prompt] if isinstance(prompt, str) else prompt
463
+
464
+ if prompt_embeds is None:
465
+ prompt_2 = prompt_2 or prompt
466
+ prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2
467
+
468
+ # We only use the pooled prompt output from the CLIPTextModel
469
+ pooled_prompt_embeds = self._get_clip_prompt_embeds(
470
+ prompt=prompt,
471
+ device=device,
472
+ num_images_per_prompt=num_images_per_prompt,
473
+ )
474
+ prompt_embeds = self._get_t5_prompt_embeds(
475
+ prompt=prompt_2,
476
+ num_images_per_prompt=num_images_per_prompt,
477
+ max_sequence_length=max_sequence_length,
478
+ device=device,
479
+ )
480
+
481
+ if self.text_encoder is not None:
482
+ if isinstance(self, FluxLoraLoaderMixin) and USE_PEFT_BACKEND:
483
+ # Retrieve the original scale by scaling back the LoRA layers
484
+ unscale_lora_layers(self.text_encoder, lora_scale)
485
+
486
+ if self.text_encoder_2 is not None:
487
+ if isinstance(self, FluxLoraLoaderMixin) and USE_PEFT_BACKEND:
488
+ # Retrieve the original scale by scaling back the LoRA layers
489
+ unscale_lora_layers(self.text_encoder_2, lora_scale)
490
+
491
+ dtype = self.text_encoder.dtype if self.text_encoder is not None else self.transformer.dtype
492
+ text_ids = torch.zeros(prompt_embeds.shape[1], 3).to(device=device, dtype=dtype)
493
+
494
+ return prompt_embeds, pooled_prompt_embeds, text_ids
495
+
496
+ def check_inputs(
497
+ self,
498
+ prompt,
499
+ prompt_2,
500
+ height,
501
+ width,
502
+ prompt_embeds=None,
503
+ pooled_prompt_embeds=None,
504
+ callback_on_step_end_tensor_inputs=None,
505
+ max_sequence_length=None,
506
+ image=None,
507
+ mask_image=None,
508
+ masked_image_latents=None,
509
+ ):
510
+ if height % (self.vae_scale_factor * 2) != 0 or width % (self.vae_scale_factor * 2) != 0:
511
+ logger.warning(
512
+ f"`height` and `width` have to be divisible by {self.vae_scale_factor * 2} but are {height} and {width}. Dimensions will be resized accordingly"
513
+ )
514
+
515
+ if callback_on_step_end_tensor_inputs is not None and not all(
516
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
517
+ ):
518
+ raise ValueError(
519
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
520
+ )
521
+
522
+ if prompt is not None and prompt_embeds is not None:
523
+ raise ValueError(
524
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
525
+ " only forward one of the two."
526
+ )
527
+ elif prompt_2 is not None and prompt_embeds is not None:
528
+ raise ValueError(
529
+ f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
530
+ " only forward one of the two."
531
+ )
532
+ elif prompt is None and prompt_embeds is None:
533
+ raise ValueError(
534
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
535
+ )
536
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
537
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
538
+ elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
539
+ raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
540
+
541
+ if prompt_embeds is not None and pooled_prompt_embeds is None:
542
+ raise ValueError(
543
+ "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
544
+ )
545
+
546
+ if max_sequence_length is not None and max_sequence_length > 512:
547
+ raise ValueError(f"`max_sequence_length` cannot be greater than 512 but is {max_sequence_length}")
548
+
549
+ if image is not None and masked_image_latents is not None:
550
+ raise ValueError(
551
+ "Please provide either `image` or `masked_image_latents`, `masked_image_latents` should not be passed."
552
+ )
553
+
554
+ if image is not None and mask_image is None:
555
+ raise ValueError("Please provide `mask_image` when passing `image`.")
556
+
557
+ @staticmethod
558
+ # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._prepare_latent_image_ids
559
+ def _prepare_latent_image_ids(batch_size, height, width, device, dtype):
560
+ latent_image_ids = torch.zeros(height, width, 3)
561
+ latent_image_ids[..., 1] = latent_image_ids[..., 1] + torch.arange(height)[:, None]
562
+ latent_image_ids[..., 2] = latent_image_ids[..., 2] + torch.arange(width)[None, :]
563
+
564
+ latent_image_id_height, latent_image_id_width, latent_image_id_channels = latent_image_ids.shape
565
+
566
+ latent_image_ids = latent_image_ids.reshape(
567
+ latent_image_id_height * latent_image_id_width, latent_image_id_channels
568
+ )
569
+
570
+ return latent_image_ids.to(device=device, dtype=dtype)
571
+
572
+ @staticmethod
573
+ # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._pack_latents
574
+ def _pack_latents(latents, batch_size, num_channels_latents, height, width):
575
+ latents = latents.view(batch_size, num_channels_latents, height // 2, 2, width // 2, 2)
576
+ latents = latents.permute(0, 2, 4, 1, 3, 5)
577
+ latents = latents.reshape(batch_size, (height // 2) * (width // 2), num_channels_latents * 4)
578
+
579
+ return latents
580
+
581
+ @staticmethod
582
+ # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline._unpack_latents
583
+ def _unpack_latents(latents, height, width, vae_scale_factor):
584
+ batch_size, num_patches, channels = latents.shape
585
+
586
+ # VAE applies 8x compression on images but we must also account for packing which requires
587
+ # latent height and width to be divisible by 2.
588
+ height = 2 * (int(height) // (vae_scale_factor * 2))
589
+ width = 2 * (int(width) // (vae_scale_factor * 2))
590
+
591
+ latents = latents.view(batch_size, height // 2, width // 2, channels // 4, 2, 2)
592
+ latents = latents.permute(0, 3, 1, 4, 2, 5)
593
+
594
+ latents = latents.reshape(batch_size, channels // (2 * 2), height, width)
595
+
596
+ return latents
597
+
598
+ def enable_vae_slicing(self):
599
+ r"""
600
+ Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
601
+ compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
602
+ """
603
+ self.vae.enable_slicing()
604
+
605
+ def disable_vae_slicing(self):
606
+ r"""
607
+ Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
608
+ computing decoding in one step.
609
+ """
610
+ self.vae.disable_slicing()
611
+
612
+ def enable_vae_tiling(self):
613
+ r"""
614
+ Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
615
+ compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
616
+ processing larger images.
617
+ """
618
+ self.vae.enable_tiling()
619
+
620
+ def disable_vae_tiling(self):
621
+ r"""
622
+ Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
623
+ computing decoding in one step.
624
+ """
625
+ self.vae.disable_tiling()
626
+
627
+ # Copied from diffusers.pipelines.flux.pipeline_flux.FluxPipeline.prepare_latents
628
+ def prepare_latents(
629
+ self,
630
+ batch_size,
631
+ num_channels_latents,
632
+ height,
633
+ width,
634
+ dtype,
635
+ device,
636
+ generator,
637
+ latents=None,
638
+ ):
639
+ # VAE applies 8x compression on images but we must also account for packing which requires
640
+ # latent height and width to be divisible by 2.
641
+ height = 2 * (int(height) // (self.vae_scale_factor * 2))
642
+ width = 2 * (int(width) // (self.vae_scale_factor * 2))
643
+
644
+ shape = (batch_size, num_channels_latents, height, width)
645
+
646
+ if latents is not None:
647
+ latent_image_ids = self._prepare_latent_image_ids(batch_size, height // 2, width // 2, device, dtype)
648
+ return latents.to(device=device, dtype=dtype), latent_image_ids
649
+
650
+ if isinstance(generator, list) and len(generator) != batch_size:
651
+ raise ValueError(
652
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
653
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
654
+ )
655
+
656
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
657
+ latents = self._pack_latents(latents, batch_size, num_channels_latents, height, width)
658
+
659
+ latent_image_ids = self._prepare_latent_image_ids(batch_size, height // 2, width // 2, device, dtype)
660
+
661
+ return latents, latent_image_ids
662
+
663
+ @property
664
+ def guidance_scale(self):
665
+ return self._guidance_scale
666
+
667
+ @property
668
+ def joint_attention_kwargs(self):
669
+ return self._joint_attention_kwargs
670
+
671
+ @property
672
+ def num_timesteps(self):
673
+ return self._num_timesteps
674
+
675
+ @property
676
+ def interrupt(self):
677
+ return self._interrupt
678
+
679
+ @torch.no_grad()
680
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
681
+ def __call__(
682
+ self,
683
+ prompt: Union[str, List[str]] = None,
684
+ prompt_2: Optional[Union[str, List[str]]] = None,
685
+ image: Optional[torch.FloatTensor] = None,
686
+ mask_image: Optional[torch.FloatTensor] = None,
687
+ masked_image_latents: Optional[torch.FloatTensor] = None,
688
+ height: Optional[int] = None,
689
+ width: Optional[int] = None,
690
+ num_inference_steps: int = 50,
691
+ sigmas: Optional[List[float]] = None,
692
+ guidance_scale: float = 30.0,
693
+ num_images_per_prompt: Optional[int] = 1,
694
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
695
+ latents: Optional[torch.FloatTensor] = None,
696
+ prompt_embeds: Optional[torch.FloatTensor] = None,
697
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
698
+ image_emb: Optional[torch.FloatTensor] = None,
699
+ output_type: Optional[str] = "pil",
700
+ return_dict: bool = True,
701
+ joint_attention_kwargs: Optional[Dict[str, Any]] = None,
702
+ callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
703
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
704
+ max_sequence_length: int = 512,
705
+ ):
706
+ r"""
707
+ Function invoked when calling the pipeline for generation.
708
+
709
+ Args:
710
+ prompt (`str` or `List[str]`, *optional*):
711
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
712
+ instead.
713
+ prompt_2 (`str` or `List[str]`, *optional*):
714
+ The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
715
+ will be used instead
716
+ image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`):
717
+ `Image`, numpy array or tensor representing an image batch to be used as the starting point. For both
718
+ numpy array and pytorch tensor, the expected value range is between `[0, 1]` If it's a tensor or a list
719
+ or tensors, the expected shape should be `(B, C, H, W)` or `(C, H, W)`. If it is a numpy array or a
720
+ list of arrays, the expected shape should be `(B, H, W, C)` or `(H, W, C)`.
721
+ mask_image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, or `List[np.ndarray]`):
722
+ `Image`, numpy array or tensor representing an image batch to mask `image`. White pixels in the mask
723
+ are repainted while black pixels are preserved. If `mask_image` is a PIL image, it is converted to a
724
+ single channel (luminance) before use. If it's a numpy array or pytorch tensor, it should contain one
725
+ color channel (L) instead of 3, so the expected shape for pytorch tensor would be `(B, 1, H, W)`, `(B,
726
+ H, W)`, `(1, H, W)`, `(H, W)`. And for numpy array would be for `(B, H, W, 1)`, `(B, H, W)`, `(H, W,
727
+ 1)`, or `(H, W)`.
728
+ mask_image_latent (`torch.Tensor`, `List[torch.Tensor]`):
729
+ `Tensor` representing an image batch to mask `image` generated by VAE. If not provided, the mask
730
+ latents tensor will ge generated by `mask_image`.
731
+ use_context (`bool`):
732
+ Whether to use the context image to guide the customization.
733
+ height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
734
+ The height in pixels of the generated image. This is set to 1024 by default for the best results.
735
+ width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
736
+ The width in pixels of the generated image. This is set to 1024 by default for the best results.
737
+ num_inference_steps (`int`, *optional*, defaults to 50):
738
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
739
+ expense of slower inference.
740
+ sigmas (`List[float]`, *optional*):
741
+ Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in
742
+ their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed
743
+ will be used.
744
+ guidance_scale (`float`, *optional*, defaults to 7.0):
745
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
746
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
747
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
748
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
749
+ usually at the expense of lower image quality.
750
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
751
+ The number of images to generate per prompt.
752
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
753
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
754
+ to make generation deterministic.
755
+ latents (`torch.FloatTensor`, *optional*):
756
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
757
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
758
+ tensor will ge generated by sampling using the supplied random `generator`.
759
+ prompt_embeds (`torch.FloatTensor`, *optional*):
760
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
761
+ provided, text embeddings will be generated from `prompt` input argument.
762
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
763
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
764
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
765
+ output_type (`str`, *optional*, defaults to `"pil"`):
766
+ The output format of the generate image. Choose between
767
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
768
+ return_dict (`bool`, *optional*, defaults to `True`):
769
+ Whether or not to return a [`~pipelines.flux.FluxPipelineOutput`] instead of a plain tuple.
770
+ joint_attention_kwargs (`dict`, *optional*):
771
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
772
+ `self.processor` in
773
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
774
+ callback_on_step_end (`Callable`, *optional*):
775
+ A function that calls at the end of each denoising steps during the inference. The function is called
776
+ with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
777
+ callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
778
+ `callback_on_step_end_tensor_inputs`.
779
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
780
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
781
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
782
+ `._callback_tensor_inputs` attribute of your pipeline class.
783
+ max_sequence_length (`int` defaults to 512): Maximum sequence length to use with the `prompt`.
784
+
785
+ Examples:
786
+
787
+ Returns:
788
+ [`~pipelines.flux.FluxPipelineOutput`] or `tuple`: [`~pipelines.flux.FluxPipelineOutput`] if `return_dict`
789
+ is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the generated
790
+ images.
791
+ """
792
+
793
+ height = height or self.default_sample_size * self.vae_scale_factor
794
+ width = width or self.default_sample_size * self.vae_scale_factor
795
+
796
+ # 1. Check inputs. Raise error if not correct
797
+ self.check_inputs(
798
+ prompt,
799
+ prompt_2,
800
+ height,
801
+ width,
802
+ prompt_embeds=prompt_embeds,
803
+ pooled_prompt_embeds=pooled_prompt_embeds,
804
+ callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs,
805
+ max_sequence_length=max_sequence_length,
806
+ image=image,
807
+ mask_image=mask_image,
808
+ masked_image_latents=masked_image_latents,
809
+ )
810
+
811
+ self._guidance_scale = guidance_scale
812
+ self._joint_attention_kwargs = joint_attention_kwargs
813
+ self._interrupt = False
814
+
815
+ # 2. Define call parameters
816
+ if prompt is not None and isinstance(prompt, str):
817
+ batch_size = 1
818
+ elif prompt is not None and isinstance(prompt, list):
819
+ batch_size = len(prompt)
820
+ else:
821
+ batch_size = prompt_embeds.shape[0]
822
+
823
+ device = self._execution_device
824
+
825
+ # 3. Prepare prompt embeddings
826
+ lora_scale = (
827
+ self.joint_attention_kwargs.get("scale", None) if self.joint_attention_kwargs is not None else None
828
+ )
829
+ (
830
+ prompt_embeds,
831
+ pooled_prompt_embeds,
832
+ text_ids,
833
+ ) = self.encode_prompt(
834
+ prompt=prompt,
835
+ prompt_2=prompt_2,
836
+ prompt_embeds=prompt_embeds,
837
+ pooled_prompt_embeds=pooled_prompt_embeds,
838
+ device=device,
839
+ num_images_per_prompt=num_images_per_prompt,
840
+ max_sequence_length=max_sequence_length,
841
+ lora_scale=lora_scale,
842
+ )
843
+
844
+ # 4. Prepare latent variables
845
+ num_channels_latents = self.vae.config.latent_channels
846
+
847
+ latents, latent_image_ids = self.prepare_latents(
848
+ batch_size * num_images_per_prompt,
849
+ num_channels_latents,
850
+ height,
851
+ width,
852
+ prompt_embeds.dtype,
853
+ device,
854
+ generator,
855
+ latents,
856
+ )
857
+
858
+ # 5. Prepare mask and masked image latents
859
+ if masked_image_latents is not None:
860
+ masked_image_latents = masked_image_latents.to(latents.device)
861
+ else:
862
+ image = self.image_processor.preprocess(image, height=height, width=width)
863
+ mask_image = self.mask_processor.preprocess(mask_image, height=height, width=width)
864
+
865
+ masked_image = image * (1 - mask_image)
866
+ masked_image = masked_image.to(device=device, dtype=prompt_embeds.dtype)
867
+
868
+ height, width = image.shape[-2:]
869
+ mask, masked_image_latents \
870
+ = self.prepare_mask_latents(
871
+ mask_image,
872
+ masked_image,
873
+ batch_size,
874
+ num_channels_latents,
875
+ num_images_per_prompt,
876
+ height,
877
+ width,
878
+ prompt_embeds.dtype,
879
+ device,
880
+ generator
881
+ )
882
+ masked_image_latents = torch.cat((masked_image_latents, mask), dim=-1)
883
+
884
+ # 6. Prepare timesteps
885
+ sigmas = np.linspace(1.0, 1 / num_inference_steps, num_inference_steps) if sigmas is None else sigmas
886
+ image_seq_len = latents.shape[1]
887
+ mu = calculate_shift(
888
+ image_seq_len,
889
+ self.scheduler.config.base_image_seq_len,
890
+ self.scheduler.config.max_image_seq_len,
891
+ self.scheduler.config.base_shift,
892
+ self.scheduler.config.max_shift,
893
+ )
894
+ timesteps, num_inference_steps = retrieve_timesteps(
895
+ self.scheduler,
896
+ num_inference_steps,
897
+ device,
898
+ sigmas=sigmas,
899
+ mu=mu,
900
+ )
901
+ num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
902
+ self._num_timesteps = len(timesteps)
903
+
904
+ # handle guidance
905
+ if self.transformer.config.guidance_embeds:
906
+ guidance = torch.full([1], guidance_scale, device=device, dtype=torch.float32)
907
+ guidance = guidance.expand(latents.shape[0])
908
+ else:
909
+ guidance = None
910
+
911
+ # 7. Denoising loop
912
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
913
+ for i, t in enumerate(timesteps):
914
+ if self.interrupt:
915
+ continue
916
+ cond_latents = masked_image_latents
917
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
918
+ timestep = t.expand(latents.shape[0]).to(latents.dtype)
919
+ noise_pred = self.transformer(
920
+ hidden_states=torch.cat((latents, cond_latents), dim=2),
921
+ timestep=timestep / 1000,
922
+ guidance=guidance,
923
+ pooled_projections=pooled_prompt_embeds,
924
+ encoder_hidden_states=prompt_embeds,
925
+ image_emb=image_emb,
926
+ txt_ids=text_ids,
927
+ img_ids=latent_image_ids,
928
+ joint_attention_kwargs=self.joint_attention_kwargs,
929
+ return_dict=False,
930
+ )[0]
931
+
932
+ # compute the previous noisy sample x_t -> x_t-1
933
+ latents_dtype = latents.dtype
934
+ latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]
935
+
936
+ if latents.dtype != latents_dtype:
937
+ if torch.backends.mps.is_available():
938
+ # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272
939
+ latents = latents.to(latents_dtype)
940
+
941
+ if callback_on_step_end is not None:
942
+ callback_kwargs = {}
943
+ for k in callback_on_step_end_tensor_inputs:
944
+ callback_kwargs[k] = locals()[k]
945
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
946
+
947
+ latents = callback_outputs.pop("latents", latents)
948
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
949
+
950
+ # call the callback, if provided
951
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
952
+ progress_bar.update()
953
+
954
+ if XLA_AVAILABLE:
955
+ xm.mark_step()
956
+
957
+ # 8. Post-process the image
958
+ if output_type == "latent":
959
+ image = latents
960
+ else:
961
+ latents = self._unpack_latents(latents, height, width, self.vae_scale_factor)
962
+ latents = (latents / self.vae.config.scaling_factor) + self.vae.config.shift_factor
963
+ image = self.vae.decode(latents, return_dict=False)[0]
964
+ image = self.image_processor.postprocess(image, output_type=output_type)
965
+
966
+ # Offload all models
967
+ self.maybe_free_model_hooks()
968
+
969
+ if not return_dict:
970
+ return (image,)
971
+
972
+ return FluxPipelineOutput(images=image)
requirements.txt ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ torch==2.5.0
2
+ torchvision==0.20.0
3
+ diffusers==0.33.0
4
+ transformers==4.49.0
5
+ Pillow==11.0.0
6
+ numpy==1.26.4
7
+ opencv-python==4.11.0.86
8
+ accelerate==0.30.1
9
+ gradio==5.23.3
10
+ protobuf==5.28.3
11
+ sentencepiece==0.2.0
utils.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Helper functions mainly for data preparation.
3
+ """
4
+ import cv2
5
+ import numpy as np
6
+ from PIL import Image
7
+
8
+
9
+ # Extracting the image and mask from gr.ImageEditor.
10
+ def process_gradio_source(editor_data):
11
+ original_image = editor_data["background"].convert("RGB")
12
+ layers = editor_data.get("layers", [])
13
+
14
+ full_mask = Image.new("L", original_image.size, 0)
15
+ cropped_region = original_image.copy()
16
+
17
+ if not layers:
18
+ return full_mask, cropped_region, original_image
19
+
20
+ try:
21
+ layer_image = layers[0]
22
+ layer_pos = (0, 0)
23
+
24
+ if layer_image.mode != "RGBA":
25
+ layer_image = layer_image.convert("RGBA")
26
+
27
+ alpha_channel = layer_image.split()[-1]
28
+
29
+ full_mask = Image.new("L", original_image.size, 0)
30
+ full_mask.paste(alpha_channel, layer_pos)
31
+ full_mask = full_mask.point(lambda p: 255 if p > 128 else 0)
32
+
33
+ original_np = np.array(original_image)
34
+ mask_np = np.array(full_mask)
35
+
36
+ mask_bool = mask_np > 0
37
+ cropped_array = np.zeros_like(original_np)
38
+ cropped_array[mask_bool] = original_np[mask_bool]
39
+
40
+ cropped_region = Image.fromarray(cropped_array)
41
+
42
+ except Exception as e:
43
+ print(f"Raise error: {str(e)}")
44
+
45
+ return original_image, full_mask, cropped_region
46
+
47
+
48
+ # Get bounding box from the mask.
49
+ def get_bbox_from_mask(mask_image):
50
+ mask_array = np.array(mask_image)
51
+ if mask_array.ndim == 3:
52
+ mask_array = cv2.cvtColor(mask_array, cv2.COLOR_RGB2GRAY)
53
+ _, binary_mask = cv2.threshold(mask_array, 127, 255, cv2.THRESH_BINARY)
54
+
55
+ contours, _ = cv2.findContours(mask_array, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
56
+ if not contours:
57
+ return None
58
+
59
+ largest_contour = max(contours, key=cv2.contourArea)
60
+ x, y, w, h = cv2.boundingRect(largest_contour)
61
+ return (x, y), (x + w, y + h)
62
+
63
+
64
+ # Crop image from the bounding box coord.
65
+ def crop_image_from_bb(original_image, top_left, bottom_right):
66
+ x1, y1 = map(int, top_left)
67
+ x2, y2 = map(int, bottom_right)
68
+ if not (0 <= x1 < x2 <= original_image.width and 0 <= y1 < y2 <= original_image.height):
69
+ raise ValueError("Invalid bounding box coordinates")
70
+ crop_box = (x1, y1, x2, y2)
71
+ cropped_image = original_image.crop(crop_box)
72
+ return cropped_image
73
+
74
+
75
+ # Resize image to the target size with zero paddings.
76
+ def resize_img_and_pad(input_image, target_size):
77
+ cropped_width, cropped_height = input_image.size
78
+ target_width, target_height = target_size
79
+ scale = min(target_width / cropped_width, target_height / cropped_height)
80
+ new_width = int(cropped_width * scale)
81
+ new_height = int(cropped_height * scale)
82
+
83
+ resized_image = input_image.resize((new_width, new_height), Image.BILINEAR)
84
+
85
+ padded_image = Image.new("RGB", target_size, (0, 0, 0))
86
+
87
+ left_padding = (target_width - new_width) // 2
88
+ top_padding = (target_height - new_height) // 2
89
+ padded_image.paste(resized_image, (left_padding, top_padding))
90
+
91
+ return padded_image
92
+
93
+
94
+ def pil_to_np(pil_img):
95
+ if pil_img.mode == 'RGBA':
96
+ rgb = pil_img.convert('RGB')
97
+ alpha = pil_img.split()[3]
98
+ img = np.array(rgb)
99
+ alpha = np.array(alpha)
100
+ else:
101
+ img = np.array(pil_img.convert('RGB'))
102
+ return img
103
+
104
+
105
+ # Helper function considering the VAE compression factor.
106
+ def nearest_multiple_of_16(ref_height):
107
+ lower = (ref_height // 16) * 16
108
+ upper = ((ref_height + 15) // 16) * 16
109
+ if ref_height - lower <= upper - ref_height:
110
+ return lower
111
+ else:
112
+ return upper
113
+
114
+
115
+ # Resize to generate the reference context.
116
+ def generate_context_reference_image(reference_image_pil, img_width=512):
117
+ reference_image_rgb = pil_to_np(reference_image_pil)
118
+ ref_height, ref_width = reference_image_rgb.shape[0], reference_image_rgb.shape[1]
119
+
120
+ ref_height = int((img_width / ref_width) * ref_height)
121
+ ref_height = nearest_multiple_of_16(ref_height)
122
+
123
+ reference_context = cv2.resize(reference_image_rgb, (img_width, ref_height))
124
+ reference_new_pil = Image.fromarray(reference_context)
125
+ return reference_new_pil
utils_multilingual.py ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Helper functions mainly for multilingual text image customization.
3
+ Acknowledgement: Codes here are heavily borrowed from TextFLUX: https://github.com/yyyyyxie/textflux.
4
+ """
5
+
6
+ import cv2
7
+ import numpy as np
8
+ from PIL import Image, ImageDraw, ImageFont
9
+
10
+
11
+ def generate_prompt(words):
12
+ words_str = ', '.join(f"'{word}'" for word in words)
13
+ prompt_template = (
14
+ "The pair of images highlights some white words on a black background, as well as their style on a real-world scene image. "
15
+ "[IMAGE1] is a template image rendering the text, with the words {words}; "
16
+ "[IMAGE2] shows the text content {words} naturally and correspondingly integrated into the image."
17
+ )
18
+ return prompt_template.format(words=words_str)
19
+
20
+
21
+ prompt_template2 = (
22
+ "The pair of images highlights some white words on a black background, as well as their style on a real-world scene image. "
23
+ "[IMAGE1] is a template image rendering the text, with the words; "
24
+ "[IMAGE2] shows the text content naturally and correspondingly integrated into the image."
25
+ )
26
+
27
+
28
+ def run_multilingual_inference(model, image_input, mask_input, reference_input, texts,
29
+ num_steps=30, guidance_scale=30, seed=42, num_images=1):
30
+ # Resize.
31
+ width, height = image_input.size
32
+ new_width = (width // 32) * 32
33
+ new_height = (height // 32) * 32
34
+ image_input = image_input.convert("RGB").resize((new_width, new_height))
35
+ mask_input = mask_input.convert("RGB").resize((new_width, new_height))
36
+
37
+ texts = [i.strip() for i in texts.split('\n')]
38
+ rendered_text = render_glyph_multi(image_input, mask_input, texts)
39
+ combined_image = Image.fromarray(np.hstack((np.array(rendered_text), np.array(image_input))))
40
+ combined_mask = Image.fromarray(
41
+ np.hstack((np.array(Image.new("RGB", image_input.size, (0, 0, 0))), np.array(mask_input))))
42
+
43
+ prompt = generate_prompt(texts)
44
+ print("Final prompt:", prompt)
45
+
46
+ all_generated_images = []
47
+ for i in range(num_images):
48
+ res = model.generate(
49
+ image=combined_image,
50
+ mask_image=combined_mask,
51
+ ref_image=reference_input,
52
+ prompt=prompt_template2,
53
+ prompt_2=prompt,
54
+ scale=1.0,
55
+ guidance_scale=guidance_scale,
56
+ num_inference_steps=num_steps,
57
+ width=combined_image.width,
58
+ height=combined_image.height,
59
+ seed=seed + i,
60
+ )[0]
61
+ all_generated_images.append(res)
62
+ return all_generated_images
63
+
64
+
65
+ def insert_spaces(text, num_spaces):
66
+ """
67
+ Insert a specified number of spaces between each character to adjust spacing during text rendering.
68
+ """
69
+ if len(text) <= 1:
70
+ return text
71
+ return (' ' * num_spaces).join(list(text))
72
+
73
+
74
+ def draw_glyph2(
75
+ font,
76
+ text,
77
+ polygon,
78
+ vertAng=10,
79
+ scale=1,
80
+ width=512,
81
+ height=512,
82
+ add_space=True,
83
+ scale_factor=2,
84
+ rotate_resample=Image.BICUBIC,
85
+ downsample_resample=Image.Resampling.LANCZOS
86
+ ):
87
+ """
88
+ Render tilted/curved text within a specified region (defined by polygon):
89
+ - First upscale (supersample), then rotate, then downsample to ensure high quality;
90
+ - Dynamically adjust font size and whether to insert spaces between characters based on the region's shape.
91
+ Return the final downsampled RGBA numpy array to the target dimensions (height, width).
92
+ """
93
+ big_w = width * scale_factor
94
+ big_h = height * scale_factor
95
+
96
+ # Upscale polygon coordinates
97
+ big_polygon = polygon * scale_factor * scale
98
+ rect = cv2.minAreaRect(big_polygon.astype(np.float32))
99
+ box = cv2.boxPoints(rect)
100
+ box = np.intp(box)
101
+
102
+ w, h = rect[1]
103
+ angle = rect[2]
104
+ if angle < -45:
105
+ angle += 90
106
+ angle = -angle
107
+ if w < h:
108
+ angle += 90
109
+
110
+ vert = False
111
+ if (abs(angle) % 90 < vertAng or abs(90 - abs(angle) % 90) % 90 < vertAng):
112
+ _w = max(box[:, 0]) - min(box[:, 0])
113
+ _h = max(box[:, 1]) - min(box[:, 1])
114
+ if _h >= _w:
115
+ vert = True
116
+ angle = 0
117
+
118
+ # Create large image and temporary white background image
119
+ big_img = Image.new("RGBA", (big_w, big_h), (0, 0, 0, 0))
120
+ tmp = Image.new("RGB", big_img.size, "white")
121
+ tmp_draw = ImageDraw.Draw(tmp)
122
+
123
+ _, _, _tw, _th = tmp_draw.textbbox((0, 0), text, font=font)
124
+ if _th == 0:
125
+ text_w = 0
126
+ else:
127
+ w_f, h_f = float(w), float(h)
128
+ text_w = min(w_f, h_f) * (_tw / _th)
129
+
130
+ if text_w <= max(w, h):
131
+ if len(text) > 1 and not vert and add_space:
132
+ for i in range(1, 100):
133
+ text_sp = insert_spaces(text, i)
134
+ _, _, tw2, th2 = tmp_draw.textbbox((0, 0), text_sp, font=font)
135
+ if th2 != 0:
136
+ if min(w, h) * (tw2 / th2) > max(w, h):
137
+ break
138
+ text = insert_spaces(text, i - 1)
139
+ font_size = min(w, h) * 0.80
140
+ else:
141
+ shrink = 0.75 if vert else 0.85
142
+ if text_w != 0:
143
+ font_size = min(w, h) / (text_w / max(w, h)) * shrink
144
+ else:
145
+ font_size = min(w, h) * 0.80
146
+
147
+ new_font = font.font_variant(size=int(font_size))
148
+ left, top, right, bottom = new_font.getbbox(text)
149
+ text_width = right - left
150
+ text_height = bottom - top
151
+
152
+ # Create transparent text rendering layer
153
+ layer = Image.new("RGBA", big_img.size, (0, 0, 0, 0))
154
+ draw_layer = ImageDraw.Draw(layer)
155
+ cx, cy = rect[0]
156
+ if not vert:
157
+ draw_layer.text(
158
+ (cx - text_width // 2, cy - text_height // 2 - top),
159
+ text,
160
+ font=new_font,
161
+ fill=(255, 255, 255, 255)
162
+ )
163
+ else:
164
+ _w_ = max(box[:, 0]) - min(box[:, 0])
165
+ x_s = min(box[:, 0]) + _w_ // 2 - text_height // 2
166
+ y_s = min(box[:, 1])
167
+ for c in text:
168
+ draw_layer.text((x_s, y_s), c, font=new_font, fill=(255, 255, 255, 255))
169
+ _, _t, _, _b = new_font.getbbox(c)
170
+ y_s += _b
171
+
172
+ rotated_layer = layer.rotate(
173
+ angle,
174
+ expand=True,
175
+ center=(cx, cy),
176
+ resample=rotate_resample
177
+ )
178
+
179
+ xo = int((big_img.width - rotated_layer.width) // 2)
180
+ yo = int((big_img.height - rotated_layer.height) // 2)
181
+ big_img.paste(rotated_layer, (xo, yo), rotated_layer)
182
+
183
+ final_img = big_img.resize((width, height), downsample_resample)
184
+ final_np = np.array(final_img)
185
+ return final_np
186
+
187
+
188
+ def render_glyph_multi(original, computed_mask, texts):
189
+ """
190
+ For each independent region in computed_mask:
191
+ - Extract region positions using contours and sort them from top to bottom, left to right;
192
+ - Call draw_glyph2 to render corresponding text in each region (supports tilt/curve);
193
+ - Overlay the rendering results of each region onto a transparent black background image, and output the final rendered image.
194
+ """
195
+ mask_np = np.array(computed_mask.convert("L"))
196
+ contours, _ = cv2.findContours(mask_np, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
197
+ regions = []
198
+ for cnt in contours:
199
+ x, y, w, h = cv2.boundingRect(cnt)
200
+ if w * h < 50:
201
+ continue
202
+ regions.append((x, y, w, h, cnt))
203
+ regions = sorted(regions, key=lambda r: (r[1], r[0]))
204
+
205
+ render_img = Image.new("RGBA", original.size, (0, 0, 0, 0))
206
+ try:
207
+ base_font = ImageFont.truetype("resources/Arial-Unicode-Regular.ttf", 40)
208
+ except:
209
+ base_font = ImageFont.load_default()
210
+
211
+ for i, region in enumerate(regions):
212
+ if i >= len(texts):
213
+ break
214
+ text = texts[i].strip()
215
+ if not text:
216
+ continue
217
+ cnt = region[4]
218
+ polygon = cnt.reshape(-1, 2)
219
+ rendered_np = draw_glyph2(
220
+ font=base_font,
221
+ text=text,
222
+ polygon=polygon,
223
+ vertAng=10,
224
+ scale=1,
225
+ width=original.size[0],
226
+ height=original.size[1],
227
+ add_space=True,
228
+ scale_factor=1,
229
+ rotate_resample=Image.BICUBIC,
230
+ downsample_resample=Image.Resampling.LANCZOS
231
+ )
232
+ rendered_img = Image.fromarray(rendered_np, mode="RGBA")
233
+ render_img = Image.alpha_composite(render_img, rendered_img)
234
+ return render_img.convert("RGB")